/** * Given a region path, get the associated hdfs store. */ private static ArrayList<HDFSStoreImpl> getHDFSStore(String regionPath) { ArrayList<HDFSStoreImpl> destinationhdfsStores = new ArrayList<HDFSStoreImpl>(); LocalRegion region = Misc.getGemFireCache().getRegionByPath(regionPath, false); if (region == null) {// no region associated return destinationhdfsStores; } if (region.getHDFSStoreName() == null) return destinationhdfsStores; HDFSStoreImpl hdfsStore = Misc.getGemFireCache().findHDFSStore(region.getHDFSStoreName()); if (hdfsStore != null) destinationhdfsStores.add(hdfsStore); return destinationhdfsStores; }
/** * Given a region path, get the associated hdfs store. */ private static ArrayList<HDFSStoreImpl> getHDFSStore(String regionPath) { ArrayList<HDFSStoreImpl> destinationhdfsStores = new ArrayList<HDFSStoreImpl>(); LocalRegion region = Misc.getGemFireCache().getRegionByPath(regionPath, false); if (region == null) {// no region associated return destinationhdfsStores; } if (region.getHDFSStoreName() == null) return destinationhdfsStores; HDFSStoreImpl hdfsStore = Misc.getGemFireCache().findHDFSStore(region.getHDFSStoreName()); if (hdfsStore != null) destinationhdfsStores.add(hdfsStore); return destinationhdfsStores; }
/** * Given a region path, get the associated hdfs store. */ private static ArrayList<HDFSStoreImpl> getHDFSStore(String regionPath) { ArrayList<HDFSStoreImpl> destinationhdfsStores = new ArrayList<HDFSStoreImpl>(); LocalRegion region = Misc.getGemFireCache().getRegionByPath(regionPath, false); if (region == null) {// no region associated return destinationhdfsStores; } if (region.getHDFSStoreName() == null) return destinationhdfsStores; HDFSStoreImpl hdfsStore = Misc.getGemFireCache().findHDFSStore(region.getHDFSStoreName()); if (hdfsStore != null) destinationhdfsStores.add(hdfsStore); return destinationhdfsStores; }
@Override public HDFSStore create(String name) { if (name == null) { throw new GemFireConfigException("HDFS store name not provided"); } this.configHolder.validate(); HDFSStore result = null; synchronized (this) { if (this.cache instanceof GemFireCacheImpl) { GemFireCacheImpl gfc = (GemFireCacheImpl) this.cache; if (gfc.findHDFSStore(name) != null) { throw new StoreExistsException(name); } HDFSStoreImpl hsi = new HDFSStoreImpl(name, this.configHolder); gfc.addHDFSStore(hsi); result = hsi; } } return result; }
@Override public HDFSStore create(String name) { if (name == null) { throw new GemFireConfigException("HDFS store name not provided"); } HDFSStore result = null; synchronized (this) { if (this.cache instanceof GemFireCacheImpl) { GemFireCacheImpl gfc = (GemFireCacheImpl) this.cache; if (gfc.findHDFSStore(name) != null) { throw new StoreExistsException(name); } HDFSStoreImpl hsi = new HDFSStoreImpl(name, this.configHolder); DDLHoplogOrganizer ddlOrganizer = new DDLHoplogOrganizer(hsi); hsi.setDDLHoplogOrganizer(ddlOrganizer); gfc.addHDFSStore(hsi); result = hsi; } } return result; }
@Override public void doMe(Transaction xact, LogInstant instant, LimitObjectInput in) throws StandardException, IOException { //Do not remove entry in SYS.SYSHDFSSTORES VTI, see #48461 //The VTI entry will be removed later by the other operation in the undo list //see MemLogger.logAndDo and undo() //This HDFSStoreDropOperation is generated by HDFSStoreCreateOperation.generateUndo() //This is different from DropHDFSStoreConstantAction.executeConstantAction(), //which has to remove the VTI entry by itself this.storeName = SharedUtils.SQLToUpperCase(storeName); HDFSStoreImpl store = (HDFSStoreImpl) Misc.getGemFireCache().findHDFSStore( storeName); try { if (store != null) { Misc.getGemFireCache().removeHDFSStore(store); } } catch (Exception e) { SanityManager.DEBUG_PRINT(GfxdConstants.TRACE_CONGLOM, "DropHDFSStore :: got Exception", e); } SanityManager.DEBUG_PRINT(GfxdConstants.TRACE_CONGLOM, "HDFSStoreDropOperation :: removed HDFSStore " + storeName + "."); }
HDFSStoreImpl hdfsStore = Misc.getGemFireCache().findHDFSStore(ddl.getHDFSStoreName()); if (hdfsStore != null) { destinationhdfsStores = new ArrayList<HDFSStoreImpl>();
public void execute(final FunctionContext context) { try { Cache cache = getCache(); final DistributedMember member = getDistributedMemberId(cache); if (cache instanceof GemFireCacheImpl) { GemFireCacheImpl cacheImpl = (GemFireCacheImpl)cache; final String hdfsStoreName = (String)context.getArguments(); final String memberName = member.getName(); HDFSStoreImpl hdfsStore = cacheImpl.findHDFSStore(hdfsStoreName); if (hdfsStore != null) { HDFSStoreConfigHolder configHolder = new HDFSStoreConfigHolder (hdfsStore); context.getResultSender().lastResult(configHolder); } else { context.getResultSender().sendException( new HDFSStoreNotFoundException( String.format("A hdfs store with name (%1$s) was not found on member (%2$s).", hdfsStoreName, memberName))); } } } catch (Exception e) { logger.error("Error occurred while executing 'describe hdfs-store': {}!", e.getMessage(), e); context.getResultSender().sendException(e); } }
HDFSStoreImpl hdfsStore = Misc.getGemFireCache().findHDFSStore(ddl.getHDFSStoreName()); if (hdfsStore != null) { destinationhdfsStores = new ArrayList<HDFSStoreImpl>();
HDFSStoreImpl hdfsStore = Misc.getGemFireCache().findHDFSStore(ddl.getHDFSStoreName()); if (hdfsStore != null) { destinationhdfsStores = new ArrayList<HDFSStoreImpl>();
@Override public void doMe(Transaction xact, LogInstant instant, LimitObjectInput in) throws StandardException, IOException { //Do not remove entry in SYS.SYSHDFSSTORES VTI, see #48461 //The VTI entry will be removed later by the other operation in the undo list //see MemLogger.logAndDo and undo() //This HDFSStoreDropOperation is generated by HDFSStoreCreateOperation.generateUndo() //This is different from DropHDFSStoreConstantAction.executeConstantAction(), //which has to remove the VTI entry by itself this.storeName = SharedUtils.SQLToUpperCase(storeName); HDFSStoreImpl store = (HDFSStoreImpl) Misc.getGemFireCache().findHDFSStore( storeName); try { if (store != null) { Misc.getGemFireCache().removeHDFSStore(store); } } catch (Exception e) { SanityManager.DEBUG_PRINT(GfxdConstants.TRACE_CONGLOM, "DropHDFSStore :: got Exception", e); } SanityManager.DEBUG_PRINT(GfxdConstants.TRACE_CONGLOM, "HDFSStoreDropOperation :: removed HDFSStore " + storeName + "."); }
@Override public void doMe(Transaction xact, LogInstant instant, LimitObjectInput in) throws StandardException, IOException { //Do not remove entry in SYS.SYSHDFSSTORES VTI, see #48461 //The VTI entry will be removed later by the other operation in the undo list //see MemLogger.logAndDo and undo() //This HDFSStoreDropOperation is generated by HDFSStoreCreateOperation.generateUndo() //This is different from DropHDFSStoreConstantAction.executeConstantAction(), //which has to remove the VTI entry by itself this.storeName = SharedUtils.SQLToUpperCase(storeName); HDFSStoreImpl store = (HDFSStoreImpl) Misc.getGemFireCache().findHDFSStore( storeName); try { if (store != null) { Misc.getGemFireCache().removeHDFSStore(store); } } catch (Exception e) { SanityManager.DEBUG_PRINT(GfxdConstants.TRACE_CONGLOM, "DropHDFSStore :: got Exception", e); } SanityManager.DEBUG_PRINT(GfxdConstants.TRACE_CONGLOM, "HDFSStoreDropOperation :: removed HDFSStore " + storeName + "."); }
public static void createAndAddAsyncQueue(String regionPath, RegionAttributes regionAttributes, Cache cache) { if (!regionAttributes.getDataPolicy().withHDFS()) { return; } String leaderRegionPath = getLeaderRegionPath(regionPath, regionAttributes, cache); String defaultAsyncQueueName = HDFSStoreFactoryImpl.getEventQueueName(leaderRegionPath); if (cache.getAsyncEventQueue(defaultAsyncQueueName) == null) { if (regionAttributes.getHDFSStoreName() != null && regionAttributes.getPartitionAttributes() != null && !(regionAttributes.getPartitionAttributes().getLocalMaxMemory() == 0)) { HDFSStore store = ((GemFireCacheImpl) cache).findHDFSStore(regionAttributes.getHDFSStoreName()); if (store == null) { throw new IllegalStateException( LocalizedStrings.HOPLOG_HDFS_STORE_NOT_FOUND.toLocalizedString(regionAttributes.getHDFSStoreName())); } HDFSIntegrationUtil .createAsyncQueueForHDFS(cache, leaderRegionPath, regionAttributes.getHDFSWriteOnly(), store); } } }
HDFSStoreImpl hsi = Misc.getGemFireCache().findHDFSStore(storeName); if (hsi != null)
/** * Validates that the config defaults are set even with minimum XML configuration */ public void testHdfsStoreConfMinParse() { this.c.loadCacheXml(new ByteArrayInputStream(XML_MIN_CONF.getBytes())); HDFSStoreImpl store = ((GemFireCacheImpl)this.c).findHDFSStore("store"); assertEquals("namenode url mismatch.", "url", store.getNameNodeURL()); assertEquals("home-dir mismatch.", "gemfire", store.getHomeDir()); HDFSCompactionConfig compactConf = store.getHDFSCompactionConfig(); assertNotNull("compaction conf should have initialized to default", compactConf); assertEquals("compaction strategy mismatch.", "size-oriented", compactConf.getCompactionStrategy()); assertTrue("compaction auto-compact mismatch.", compactConf.getAutoCompaction()); assertTrue("compaction auto-major-compact mismatch.", compactConf.getAutoMajorCompaction()); assertEquals("compaction max-input-file-size mismatch.", 512, compactConf.getMaxInputFileSizeMB()); assertEquals("compaction min-input-file-count.", 4, compactConf.getMinInputFileCount()); assertEquals("compaction max-iteration-size.", 10, compactConf.getMaxInputFileCount()); assertEquals("compaction max-concurrency", 10, compactConf.getMaxThreads()); assertEquals("compaction max-major-concurrency", 2, compactConf.getMajorCompactionMaxThreads()); assertEquals("compaction major-interval", 720, compactConf.getMajorCompactionIntervalMins()); assertEquals("compaction cleanup-interval", 30, compactConf.getOldFilesCleanupIntervalMins()); }
HDFSStoreImpl hsi = Misc.getGemFireCache().findHDFSStore(storeName); if (hsi != null)
HDFSStoreImpl hsi = Misc.getGemFireCache().findHDFSStore(storeName); if (hsi != null)
/** * Validates if hdfs store conf is getting complety and correctly parsed */ public void testHdfsStoreConfFullParsing() { String conf = createStoreConf(null, "123"); this.c.loadCacheXml(new ByteArrayInputStream(conf.getBytes())); HDFSStoreImpl store = ((GemFireCacheImpl)this.c).findHDFSStore("store"); assertEquals("namenode url mismatch.", "url", store.getNameNodeURL()); assertEquals("home-dir mismatch.", "dir", store.getHomeDir()); assertEquals("hdfs-client-config-file mismatch.", "client", store.getHDFSClientConfigFile()); assertEquals("block-cache-size mismatch.", 24.5f, store.getBlockCacheSize()); HDFSCompactionConfig compactConf = store.getHDFSCompactionConfig(); assertEquals("compaction strategy mismatch.", "size-oriented", compactConf.getCompactionStrategy()); assertFalse("compaction auto-compact mismatch.", compactConf.getAutoCompaction()); assertTrue("compaction auto-major-compact mismatch.", compactConf.getAutoMajorCompaction()); assertEquals("compaction max-input-file-size mismatch.", 123, compactConf.getMaxInputFileSizeMB()); assertEquals("compaction min-input-file-count.", 9, compactConf.getMinInputFileCount()); assertEquals("compaction max-input-file-count.", 1234, compactConf.getMaxInputFileCount()); assertEquals("compaction max-concurrency", 23, compactConf.getMaxThreads()); assertEquals("compaction max-major-concurrency", 27, compactConf.getMajorCompactionMaxThreads()); assertEquals("compaction major-interval", 781, compactConf.getMajorCompactionIntervalMins()); assertEquals("compaction major-interval", 711, compactConf.getOldFilesCleanupIntervalMins()); }
HDFSStoreImpl hdfsStore = findHDFSStore(attrs.getHDFSStoreName()); if (attrs.getPartitionAttributes().getLocalMaxMemory() != 0 && hdfsStore == null) {
HDFSStoreImpl store = (HDFSStoreImpl) Misc.getGemFireCache().findHDFSStore( hdfsStoreName); SanityManager.DEBUG_PRINT(GfxdConstants.TRACE_CONGLOM,