public CacheEvictionStats clearRegionBlockCache(Region region) { long evictedBlocks = 0; for(Store store : region.getStores()) { for(StoreFile hFile : store.getStorefiles()) { evictedBlocks += blockCache.evictBlocksByHfileName(hFile.getPath().getName()); } } return CacheEvictionStats.builder() .withEvictedBlocks(evictedBlocks) .build(); }
List<? extends Store> stores = region.getStores(); for (Store store : stores) { String cf = store.getColumnFamilyName(); Collection<? extends StoreFile> storeFiles = store.getStorefiles(); out.write("\n\n <h3>Column Family: "); out.print( cf ); out.write("</h3>\n\n <h4>Memstore size (MB): "); out.print( (int) (store.getMemStoreSize().getHeapSize() / 1024 / 1024) ); out.write("</h4>\n\n <h4>Store Files</h4>\n\n <table class=\"table table-striped\">\n <tr>\n <th>Store File</th>\n <th>Size (MB)</th>\n <th>Modification time</th>\n </tr>\n "); for(StoreFile sf : storeFiles) {
@Override public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException { return wrap(store.getColumnFamilyDescriptor().getName(), scanner); }
@Override public void refreshHFiles(RpcController controller, RefreshHFilesProtos.RefreshHFilesRequest request, RpcCallback<RefreshHFilesProtos.RefreshHFilesResponse> done) { try { for (Store store : env.getRegion().getStores()) { LOG.debug("Refreshing HFiles for region: " + store.getRegionInfo().getRegionNameAsString() + " and store: " + store.getColumnFamilyName() + "class:" + store.getClass()); store.refreshStoreFiles(); } } catch (IOException ioe) { LOG.error("Exception while trying to refresh store files: ", ioe); CoprocessorRpcUtils.setControllerException(controller, ioe); } done.run(RefreshHFilesProtos.RefreshHFilesResponse.getDefaultInstance()); }
if (store.getCurrentParallelPutCount() > this.parallelPutToStoreThreadLimit || preparePutCount > this.parallelPreparePutToStoreThreadLimit) { tooBusyStore = (tooBusyStore == null ? store.getColumnFamilyName() : tooBusyStore + "," + store.getColumnFamilyName()); LOG.trace(store.getColumnFamilyName() + ": preparePutCount=" + preparePutCount + "; currentParallelPutCount=" + store.getCurrentParallelPutCount());
@Override public InternalScanner preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException { LOG.info("preFlush, filter using PayloadDataFilter"); Scan scan = new Scan(); scan.setFilter(new PayloadDataFilter(c.getEnvironment(), System.currentTimeMillis(), prefixLength, topicMetadataCache)); return new StoreScanner(store, store.getScanInfo(), scan, Collections.singletonList(memstoreScanner), ScanType.COMPACT_DROP_DELETES, store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP); }
@Override public InternalScanner preFlush(ObserverContext<RegionCoprocessorEnvironment> e, Store store, InternalScanner scanner) throws IOException { byte[] family = store.getFamily().getName(); return new IncrementSummingScanner(region, IncrementHandlerState.BATCH_UNLIMITED, scanner, ScanType.COMPACT_RETAIN_DELETES, state.getCompactionBound(family), state.getOldestVisibleTimestamp(family)); }
protected ScanInfo getScanInfo(Store store, RegionCoprocessorEnvironment e) { byte[] data = ((ZKWatcher)e.getSharedData().get(zkkey)).getData(); if (data == null) { return null; } ScanInfo oldSI = store.getScanInfo(); if (oldSI.getTtl() == Long.MAX_VALUE) { return null; } long ttl = Math.max(EnvironmentEdgeManager.currentTimeMillis() - Bytes.toLong(data), oldSI.getTtl()); return new ScanInfo(store.getFamily(), ttl, oldSI.getTimeToPurgeDeletes(), oldSI.getComparator()); }
@Test public void testRequestOnRegion() throws IOException, InterruptedException { Tracker tracker = new Tracker(); TRACKER = tracker; region.requestCompaction("test", Store.PRIORITY_USER, false, tracker); tracker.await(); assertEquals(1, tracker.notExecutedStores.size()); assertEquals(Bytes.toString(CF2), tracker.notExecutedStores.get(0).getFirst().getColumnFamilyName()); assertThat(tracker.notExecutedStores.get(0).getSecond(), containsString("compaction request was cancelled")); assertEquals(1, tracker.beforeExecuteStores.size()); assertEquals(Bytes.toString(CF1), tracker.beforeExecuteStores.get(0).getColumnFamilyName()); assertEquals(1, tracker.afterExecuteStores.size()); assertEquals(Bytes.toString(CF1), tracker.afterExecuteStores.get(0).getColumnFamilyName()); }
long maxSeqIdBefore = store.getMaxSequenceId(); store.refreshStoreFiles(); long storeSeqId = store.getMaxSequenceId(); if (storeSeqId < smallestSeqIdInStores) { smallestSeqIdInStores = storeSeqId; null : this.prepareFlushResult.storeFlushCtxs.get(store.getFamily().getName()); if (ctx != null) { long snapshotSize = store.getFlushableSize(); ctx.abort(); this.addAndGetGlobalMemstoreSize(-snapshotSize); this.prepareFlushResult.storeFlushCtxs.remove(store.getFamily().getName()); totalFreedSize += snapshotSize; mvcc.advanceTo(s.getMaxMemstoreTS());
store.getFamily().getName()) - 1; if (earliest > 0 && earliest + flushPerChanges < mvcc.getReadPoint()) { if (LOG.isDebugEnabled()) { LOG.debug("Flush column family " + store.getColumnFamilyName() + " of " + getRegionInfo().getEncodedName() + " because unflushed sequenceid=" + earliest + " is > " + this.flushPerChanges + " from current=" + mvcc.getReadPoint()); if (store.timeOfOldestEdit() < now - this.flushCheckInterval) { if (LOG.isDebugEnabled()) { LOG.debug("Flush column family: " + store.getColumnFamilyName() + " of " + getRegionInfo().getEncodedName() + " because time of oldest edit=" + store.timeOfOldestEdit() + " is > " + this.flushCheckInterval + " from now =" + now);
@Override protected boolean shouldSplit() { if (region.shouldForceSplit()) return true; boolean foundABigStore = false; // Get count of regions that have the same common table as this.region int tableRegionsCount = getCountOfCommonTableRegions(); // Get size to check long sizeToCheck = getSizeToCheck(tableRegionsCount); for (Store store : region.getStores().values()) { // If any of the stores is unable to split (eg they contain reference files) // then don't split if ((!store.canSplit())) { return false; } // Mark if any store is big enough long size = store.getSize(); if (size > sizeToCheck) { LOG.debug("ShouldSplit because " + store.getColumnFamilyName() + " size=" + size + ", sizeToCheck=" + sizeToCheck + ", regionsWithCommonTable=" + tableRegionsCount); foundABigStore = true; break; } } return foundABigStore; }
@Override public InternalScanner createCompactionScanner(RegionCoprocessorEnvironment env, Store store, InternalScanner delegate) { ImmutableBytesPtr cfKey = new ImmutableBytesPtr(store.getColumnFamilyDescriptor().getName()); LOG.info("StatisticsScanner created for table: " + tableName + " CF: " + store.getColumnFamilyName()); return new StatisticsScanner(this, statsWriter, env, delegate, cfKey); }
secondaryRegion.getStore(f).refreshStoreFiles(); Assert.assertEquals(3, secondaryRegion.getStore(f).getStorefilesCount());
private boolean shouldFlush(Store store) { if (store.getMemStoreSize() > this.flushSizeLowerBound) { if (LOG.isDebugEnabled()) { LOG.debug("Flush Column Family " + store.getColumnFamilyName() + " of " + region.getRegionInfo().getEncodedName() + " because memstoreSize=" + store.getMemStoreSize() + " > lower bound=" + this.flushSizeLowerBound); } return true; } return region.shouldFlushStore(store); }
long storeSeqId = store.getMaxSequenceId(); List<String> storeFiles = storeDescriptor.getStoreFileList(); try { store.refreshStoreFiles(storeFiles); // replace the files with the new ones } catch (FileNotFoundException ex) { LOG.warn(getRegionInfo().getEncodedName() + " : " continue; if (store.getMaxSequenceId() != storeSeqId) { null : this.prepareFlushResult.storeFlushCtxs.get(family); if (ctx != null) { long snapshotSize = store.getFlushableSize(); ctx.abort(); this.addAndGetGlobalMemstoreSize(-snapshotSize);
@Override public InternalScanner preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException { LOG.info("preFlush, filter using PayloadDataFilter"); Scan scan = new Scan(); scan.setFilter(new PayloadDataFilter(c.getEnvironment(), System.currentTimeMillis(), prefixLength, topicMetadataCache)); return new StoreScanner(store, store.getScanInfo(), scan, Collections.singletonList(memstoreScanner), ScanType.COMPACT_DROP_DELETES, store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP); }
@Override public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store, InternalScanner scanner, ScanType scanType, CompactionRequest request) throws IOException { byte[] family = store.getFamily().getName(); return new IncrementSummingScanner(region, IncrementHandlerState.BATCH_UNLIMITED, scanner, scanType, state.getCompactionBound(family), state.getOldestVisibleTimestamp(family)); } }
@Override public void refreshHFiles(RpcController controller, RefreshHFilesProtos.RefreshHFilesRequest request, RpcCallback<RefreshHFilesProtos.RefreshHFilesResponse> done) { try { for (Store store : env.getRegion().getStores()) { LOG.debug("Refreshing HFiles for region: " + store.getRegionInfo().getRegionNameAsString() + " and store: " + store.getColumnFamilyName() + "class:" + store.getClass()); store.refreshStoreFiles(); } } catch (IOException ioe) { LOG.error("Exception while trying to refresh store files: ", ioe); CoprocessorRpcUtils.setControllerException(controller, ioe); } done.run(RefreshHFilesProtos.RefreshHFilesResponse.getDefaultInstance()); }