protected InternalDistributedSystem connect(Properties props) { return (InternalDistributedSystem) DistributedSystem.connect(props); }
/** * Returns a connection to the distributed system that is appropriate for administration. This * method is for internal use only by the admin API. * * @since GemFire 4.0 */ protected static DistributedSystem connectForAdmin(Properties props) { DistributedSystem existing = getConnection(props); if (existing != null) { return existing; } else { // logger.info("creating new distributed system for admin"); // for (java.util.Enumeration en=props.propertyNames(); en.hasMoreElements(); ) { // String prop=(String)en.nextElement(); // logger.info(prop + "=" + props.getProperty(prop)); // } props.setProperty(CONSERVE_SOCKETS, "true"); // LOG: no longer using the LogWriter that was passed in return connect(props); } }
public static String init(String className) throws Exception { Properties props = new Properties(); props.setProperty(CACHE_XML_FILE, TestUtil.getResourcePath(CacheUtils.class, "cachejta.xml")); String tableName = ""; props.setProperty(MCAST_PORT, "0"); ds = DistributedSystem.connect(props); cache = CacheFactory.create(ds); if (className != null && !className.equals("")) { String time = new Long(System.currentTimeMillis()).toString(); tableName = className + time; createTable(tableName); } return tableName; }
/** * Creates a "loner" <code>DistributedSystem</code> for this test. */ @Before public void setUp() { Properties props = getProperties(); system = (InternalDistributedSystem) DistributedSystem.connect(props); }
/** * This method creates cache. The cache created is a loner, so this is only suitable for single VM * tests. * */ public static synchronized InternalCache createCache() { if (cache == null) { Properties dsp = new Properties(); dsp.setProperty(MCAST_PORT, "0"); dsp.setProperty(LOCATORS, ""); DistributedSystem sys = DistributedSystem.connect(dsp); try { cache = (InternalCache) CacheFactory.create(sys); } catch (CacheExistsException exp) { cache = (InternalCache) CacheFactory.getInstance(sys); } catch (RegionExistsException rex) { cache = (InternalCache) CacheFactory.getInstance(sys); } } return cache; }
this.dsProps.setProperty(LOCATORS, ""); InternalDistributedSystem system = (InternalDistributedSystem) DistributedSystem.connect(this.dsProps);
@Before public void setup() { Properties properties = new Properties(); properties.put(ConfigurationProperties.LOCATORS, ""); properties.put(ConfigurationProperties.MCAST_PORT, "0"); system = DistributedSystem.connect(properties); lockService = DistributedLockService.create("Test Lock Service", system); }
protected void createCache() throws CacheException { Properties p = new Properties(); p.setProperty(MCAST_PORT, "0"); // loner this.cache = (GemFireCacheImpl) CacheFactory.create(DistributedSystem.connect(p)); AttributesFactory<String, String> af = new AttributesFactory<>(); af.setScope(Scope.DISTRIBUTED_NO_ACK); af.setIndexMaintenanceSynchronous(true); this.region = this.cache.createRegion("TXTest", af.create()); this.txMgr = this.cache.getCacheTransactionManager(); }
private static DiskStoreImpl createForOffline(String dsName, File[] dsDirs, boolean offlineCompacting, boolean offlineValidate, boolean upgradeVersionOnly, long maxOplogSize, boolean needsOplogs, boolean offlineModify) throws Exception { if (dsDirs == null) { dsDirs = new File[] {new File("")}; } // need a cache so create a loner ds Properties props = new Properties(); props.setProperty(LOCATORS, ""); props.setProperty(MCAST_PORT, "0"); props.setProperty(CACHE_XML_FILE, ""); DistributedSystem ds = DistributedSystem.connect(props); offlineDS = ds; InternalCache cache = (InternalCache) CacheFactory.create(ds); offlineCache = cache; DiskStoreFactory dsf = cache.createDiskStoreFactory(); dsf.setDiskDirs(dsDirs); if (offlineCompacting && maxOplogSize != -1L) { dsf.setMaxOplogSize(maxOplogSize); } DiskStoreImpl dsi = new DiskStoreImpl(cache, dsName, ((DiskStoreFactoryImpl) dsf).getDiskStoreAttributes(), false, null, true, upgradeVersionOnly, offlineValidate, offlineCompacting, needsOplogs, offlineModify); cache.addDiskStore(dsi); return dsi; }
@SuppressWarnings("deprecation") @Before public void setUp() throws Exception { // System.setProperty("gemfire.stats.debug.debugSampleCollector", "true"); final Properties props = new Properties(); props.setProperty(MCAST_PORT, "0"); props.setProperty(ENABLE_TIME_STATISTICS, "true"); props.setProperty(STATISTIC_SAMPLING_ENABLED, "false"); props.setProperty(STATISTIC_SAMPLE_RATE, "60000"); this.system = (InternalDistributedSystem) DistributedSystem.connect(props); assertNotNull(this.system.getStatSampler()); assertNotNull(this.system.getStatSampler().waitForSampleCollector(TIMEOUT)); new CacheFactory().create(); init(); sample(); }
this.myDs = (InternalDistributedSystem) DistributedSystem.connect(connectEnv);
public static void main(String[] args) throws Throwable { Properties props = new Properties(); DistributedSystem system = DistributedSystem.connect(props); Cache cache = CacheFactory.create(system); AttributesFactory factory = new AttributesFactory(); Region region = cache.createRegion("DataSerializable", factory.create()); region.put("User", new User("Fred", 42)); new CompanySerializer(); Address address = new Address(); Company company = new Company("My Company", address); region.put("Company", company); region.put("Employee", new Employee(43, "Bob", new Date(), company)); Thread.sleep(60 * 1000); }
/** * Creates a "loner" <code>DistributedSystem</code> for this test. */ @Before public void setUp() throws Exception { this.system = DistributedSystem.connect(defineProperties()); }
private void setUp() throws CacheException { this.ds = DistributedSystem.connect(new Properties()); this.cache = CacheFactory.create(ds); AttributesFactory attributesFactory = new AttributesFactory(); attributesFactory.setValueConstraint(Portfolio.class); this.regionAttributes = attributesFactory.create(); this.qs = this.cache.getQueryService(); }
this.cacheConfig.getPostProcessor()); try { ds = DistributedSystem.connect(this.dsProps); } finally { SecurityConfig.remove();
/** * Byte arrays */ public static void main4(String[] args) throws Exception { DistributedSystem system = DistributedSystem.connect(new java.util.Properties()); Cache cache = CacheFactory.create(system); AttributesFactory factory = new AttributesFactory(); factory.setEvictionAttributes(EvictionAttributes.createLRUMemoryAttributes(2, (ObjectSizer) null, EvictionAction.OVERFLOW_TO_DISK)); LocalRegion region = (LocalRegion) cache.createRegion("TestDiskRegion", factory.create()); // DiskRegion dr = region.getDiskRegion(); // DiskRegionStats diskStats = dr.getStats(); // EvictionStatistics lruStats = getLRUStats(region); // int total; // for (total = 0; lruStats.getEvictions() > 100; total++) { // region.put(new Integer(total), String.valueOf(total).getBytes()); // } // for (int i = 0; i < total; i++) { // byte[] bytes = (byte[]) region.get(new Integer(i)); // Assert.assertTrue((new String(bytes)).equals(String.valueOf(i))); // } for (int i = 0; i < 100000; i++) { System.out.println(i); region.put(String.valueOf(i), String.valueOf(i).getBytes()); } }
public static void main1(String[] args) throws Exception { DistributedSystem system = DistributedSystem.connect(new java.util.Properties()); Cache cache = CacheFactory.create(system); AttributesFactory factory = new AttributesFactory(); factory.setEvictionAttributes(EvictionAttributes.createLRUMemoryAttributes(2, (ObjectSizer) null, EvictionAction.OVERFLOW_TO_DISK)); factory.setCacheListener(new CacheListenerAdapter() { @Override public void afterUpdate(EntryEvent event) { System.out.println("UPDATE: " + event.getKey() + " -> (" + event.getOldValue() + " -> " + event.getNewValue() + ")"); } }); LocalRegion region = (LocalRegion) cache.createRegion("TestDiskRegion", factory.create()); DiskRegion dr = region.getDiskRegion(); DiskRegionStats diskStats = dr.getStats(); EvictionCounters lruStats = getLRUStats(region); BufferedReader br = new BufferedReader(new InputStreamReader(System.in)); System.out.println("Hit enter to perform action"); for (int i = 0; true; i++) { br.readLine(); // Thread.sleep(500); Object key = new Integer(i); Object value = new byte[200000]; region.put(key, value); System.out.println(key + " -> " + value + " evictions = " + lruStats.getEvictions() + ", writes = " + diskStats.getWrites()); } }
dsProps.setProperty(ConfigurationProperties.ENABLE_TIME_STATISTICS, "true"); dsProps.setProperty(ConfigurationProperties.STATISTIC_SAMPLING_ENABLED, "true"); DistributedSystem ds = DistributedSystem.connect(dsProps); final LogWriter logger = ds.getLogWriter();
public static void main(String[] args) throws Exception { DistributedSystem system = DistributedSystem.connect(new java.util.Properties()); Cache cache = CacheFactory.create(system); AttributesFactory factory = new AttributesFactory();
/** * Filling up the region with keys and values */ public static void main5(String[] args) throws Exception { DistributedSystem system = DistributedSystem.connect(new java.util.Properties()); Cache cache = CacheFactory.create(system); AttributesFactory factory = new AttributesFactory(); factory.setEvictionAttributes(EvictionAttributes.createLRUMemoryAttributes(2, (ObjectSizer) null, EvictionAction.OVERFLOW_TO_DISK)); LocalRegion region = (LocalRegion) cache.createRegion("TestDiskRegion", factory.create()); // DiskRegion dr = region.getDiskRegion(); // DiskRegionStats diskStats = dr.getStats(); EvictionCounters lruStats = getLRUStats(region); for (int i = 0; i < 10000; i++) { int[] array = new int[1000]; array[0] = i; try { region.put(array, new Integer(i)); } catch (IllegalStateException ex) { System.out.println("Ran out of space: " + ex); return; } } String s = "Limit is " + lruStats.getLimit() + " evictions are " + lruStats.getEvictions(); throw new RuntimeException(s); }