/** * Sets the HDFSStore name attribute. * This causes the region to belong to the HDFSStore. * @param name the name of the hdfsstore * @return a reference to this RegionFactory object * * @see AttributesFactory#setHDFSStoreName */ public RegionFactory<K,V> setHDFSStoreName(String name) { this.attrsFactory.setHDFSStoreName(name); return this; }
final public void persistenceHDFSDefinition(AttributesFactory afact, boolean[] repPartPersFlags) throws ParseException, StandardException {String hdfsStoreName, prevHDFSStore = null; jj_consume_token(HDFSSTORE); jj_consume_token(LEFT_PAREN); hdfsStoreName = identifier(Limits.MAX_IDENTIFIER_LENGTH, true); jj_consume_token(RIGHT_PAREN); repPartPersFlags[5] = true; if (hdfsStoreName != null) { if (prevHDFSStore != null) { {if (true) throw StandardException.newException(SQLState.LANG_SYNTAX_ERROR, "HDFS Store '"+ prevHDFSStore +"' already set");} } prevHDFSStore = hdfsStoreName; afact.setHDFSStoreName(hdfsStoreName); } switch (jj_nt.kind) { case WRITEONLY:{ jj_consume_token(WRITEONLY); afact.setHDFSWriteOnly(true); break; } default: jj_la1[313] = jj_gen; ; } }
final public void persistenceHDFSDefinition(AttributesFactory afact, boolean[] repPartPersFlags) throws ParseException, StandardException {String hdfsStoreName, prevHDFSStore = null; jj_consume_token(HDFSSTORE); jj_consume_token(LEFT_PAREN); hdfsStoreName = identifier(Limits.MAX_IDENTIFIER_LENGTH, true); jj_consume_token(RIGHT_PAREN); repPartPersFlags[5] = true; if (hdfsStoreName != null) { if (prevHDFSStore != null) { {if (true) throw StandardException.newException(SQLState.LANG_SYNTAX_ERROR, "HDFS Store '"+ prevHDFSStore +"' already set");} } prevHDFSStore = hdfsStoreName; afact.setHDFSStoreName(hdfsStoreName); } switch (jj_nt.kind) { case WRITEONLY:{ jj_consume_token(WRITEONLY); afact.setHDFSWriteOnly(true); break; } default: jj_la1[319] = jj_gen; ; } }
final public void persistenceHDFSDefinition(AttributesFactory afact, boolean[] repPartPersFlags) throws ParseException, StandardException {String hdfsStoreName, prevHDFSStore = null; jj_consume_token(HDFSSTORE); jj_consume_token(LEFT_PAREN); hdfsStoreName = identifier(Limits.MAX_IDENTIFIER_LENGTH, true); jj_consume_token(RIGHT_PAREN); repPartPersFlags[5] = true; if (hdfsStoreName != null) { if (prevHDFSStore != null) { {if (true) throw StandardException.newException(SQLState.LANG_SYNTAX_ERROR, "HDFS Store '"+ prevHDFSStore +"' already set");} } prevHDFSStore = hdfsStoreName; afact.setHDFSStoreName(hdfsStoreName); } switch (jj_nt.kind) { case WRITEONLY:{ jj_consume_token(WRITEONLY); afact.setHDFSWriteOnly(true); break; } default: jj_la1[313] = jj_gen; ; } }
@Override public Object call() throws Exception { HDFSStoreFactory storefactory = getCache().createHDFSStoreFactory(); storefactory.setHomeDir("./"+regionName); storefactory.create(storeName); AttributesFactory<Integer, String> af = new AttributesFactory<Integer, String>(); af.setDataPolicy(DataPolicy.HDFS_PARTITION); af.setHDFSStoreName(storeName); Region r = getCache().createRegionFactory(af.create()).create(regionName); r.put("key1", "value1"); return null; } };
f.setHDFSStoreName(rd.getHDFSStoreDescription().getName());
f.setHDFSStoreName(this.getHDFSStoreDescription().getName());
afact.setHDFSStoreName(null); afact.setDataPolicy(DataPolicy.PARTITION);
hqf.setMaximumQueueMemory(3); hqf.setBatchTimeInterval(batchInterval); af.setHDFSStoreName(uniqueName);
hqf.setMaximumQueueMemory(3); hqf.setBatchTimeInterval(batchInterval); af.setHDFSStoreName(uniqueName);
hqf.setMaximumQueueMemory(3); hqf.setBatchTimeInterval(batchInterval); af.setHDFSStoreName(uniqueName);
afact.setHDFSStoreName(null); afact.setDataPolicy(DataPolicy.PARTITION);
afact.setHDFSStoreName(null); afact.setDataPolicy(DataPolicy.PARTITION);
public Object call() throws Exception { AttributesFactory af = new AttributesFactory(); af.setDataPolicy(DataPolicy.HDFS_PARTITION); PartitionAttributesFactory paf = new PartitionAttributesFactory(); paf.setTotalNumBuckets(totalnumOfBuckets); paf.setRedundantCopies(1); af.setPartitionAttributes(paf.create()); HDFSEventQueueAttributesFactory hqf= new HDFSEventQueueAttributesFactory(); hqf.setBatchSizeMB(batchSize); hqf.setBatchTimeInterval(batchInterval); hqf.setPersistent(false); hqf.setMaximumQueueMemory(1); HDFSStoreFactory hsf = getCache().createHDFSStoreFactory(); hsf.setHDFSEventQueueAttributes(hqf.create()); hsf.setHomeDir(folderPath); hsf.create(uniqueName); af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY)); af.setHDFSWriteOnly(false); af.setHDFSStoreName(uniqueName); createRootRegion(uniqueName, af.create()); return 0; } };
@Override public Object call() throws Exception { HDFSStoreFactory storefactory = getCache().createHDFSStoreFactory(); storefactory.setHomeDir("./"+regionName); storefactory.create(storeName); // DataPolicy PARTITION with localMaxMemory 0 cannot be created AttributesFactory<Integer, String> af = new AttributesFactory<Integer, String>(); af.setDataPolicy(DataPolicy.PARTITION); PartitionAttributesFactory<Integer, String> paf = new PartitionAttributesFactory<Integer, String>(); paf.setLocalMaxMemory(0); af.setPartitionAttributes(paf.create()); // DataPolicy PARTITION with localMaxMemory 0 can be created if hdfsStoreName is set af.setHDFSStoreName(storeName); // No need to check with different storeNames (can never be done in GemFireXD) Region r = getCache().createRegionFactory(af.create()).create(regionName); r.localDestroyRegion(); // DataPolicy HDFS_PARTITION with localMaxMemory 0 can be created af = new AttributesFactory<Integer, String>(); af.setDataPolicy(DataPolicy.HDFS_PARTITION); af.setPartitionAttributes(paf.create()); getCache().createRegionFactory(af.create()).create(regionName); return null; } };
public Object call() throws Exception { AttributesFactory af = new AttributesFactory(); af.setDataPolicy(DataPolicy.HDFS_PARTITION); PartitionAttributesFactory paf = new PartitionAttributesFactory(); paf.setTotalNumBuckets(totalnumOfBuckets); paf.setRedundantCopies(1); af.setPartitionAttributes(paf.create()); HDFSEventQueueAttributesFactory hqf= new HDFSEventQueueAttributesFactory(); hqf.setBatchSizeMB(batchSize); hqf.setBatchTimeInterval(batchInterval); hqf.setPersistent(true); hqf.setDiskStoreName(uniqueName + vm.getPid()); HDFSStoreFactory hsf = getCache().createHDFSStoreFactory(); hsf.setHomeDir(folderPath); hsf.setHDFSEventQueueAttributes(hqf.create()); DiskStore ds = getCache().createDiskStoreFactory().create(uniqueName + vm.getPid()); af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY)); af.setHDFSStoreName(uniqueName); af.setHDFSWriteOnly(false); hsf.create(uniqueName); createRootRegion(uniqueName, af.create()); return 0; } };
paf.setRedundantCopies(1); af.setHDFSStoreName(uniqueName); af.setPartitionAttributes(paf.create()); af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maximumEntries, EvictionAction.LOCAL_DESTROY));
} else { afact.setDataPolicy(ra.getDataPolicy()); afact.setHDFSStoreName(ra.getHDFSStoreName()); afact.setHDFSWriteOnly(ra.getHDFSWriteOnly());
} else { afact.setDataPolicy(ra.getDataPolicy()); afact.setHDFSStoreName(ra.getHDFSStoreName()); afact.setHDFSWriteOnly(ra.getHDFSWriteOnly());
} else { afact.setDataPolicy(ra.getDataPolicy()); afact.setHDFSStoreName(ra.getHDFSStoreName()); afact.setHDFSWriteOnly(ra.getHDFSWriteOnly());