" family=" + familyName); if (major) { store.triggerMajorCompaction(); store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null); if (!compaction.isPresent()) { break; store.compact(compaction.get(), NoLimitThroughputController.INSTANCE, null); if (storeFiles != null && !storeFiles.isEmpty()) { if (deleteCompacted) { } while (store.needsCompaction() && !compactOnce);
/** * Create a "mock" HStore that uses the tmpDir specified by the user and * the store dir to compact as source. */ private static HStore getStore(final Configuration conf, final FileSystem fs, final Path tableDir, final TableDescriptor htd, final RegionInfo hri, final String familyName, final Path tempDir) throws IOException { HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, hri) { @Override public Path getTempDir() { return tempDir; } }; HRegion region = new HRegion(regionFs, null, conf, htd, null); return new HStore(region, htd.getColumnFamily(Bytes.toBytes(familyName)), conf); } }
/** * Creates a writer for the mob file in temp directory. * @param conf The current configuration. * @param fs The current file system. * @param family The descriptor of the current column family. * @param mobFileName The mob file name. * @param basePath The basic path for a temp directory. * @param maxKeyCount The key count. * @param compression The compression algorithm. * @param cacheConfig The current cache config. * @param cryptoContext The encryption context. * @param isCompaction If the writer is used in compaction. * @return The writer for the mob file. * @throws IOException */ public static StoreFileWriter createWriter(Configuration conf, FileSystem fs, ColumnFamilyDescriptor family, MobFileName mobFileName, Path basePath, long maxKeyCount, Compression.Algorithm compression, CacheConfig cacheConfig, Encryption.Context cryptoContext, boolean isCompaction) throws IOException { return createWriter(conf, fs, family, new Path(basePath, mobFileName.getFileName()), maxKeyCount, compression, cacheConfig, cryptoContext, HStore.getChecksumType(conf), HStore.getBytesPerChecksum(conf), family.getBlocksize(), BloomType.NONE, isCompaction); }
HStoreFile moveFileIntoPlace(Path newFile) throws IOException { validateStoreFile(newFile); // Move the file into the right spot Path destPath = fs.commitStoreFile(getColumnFamilyName(), newFile); return createStoreFileAndReader(destPath); }
@Override public void refreshStoreFiles() throws IOException { Collection<StoreFileInfo> newFiles = fs.getStoreFiles(getColumnFamilyName()); refreshStoreFilesInternal(newFiles); }
/** * Creates an unsorted list of StoreFile loaded in parallel * from the given directory. * @throws IOException */ private List<HStoreFile> loadStoreFiles() throws IOException { Collection<StoreFileInfo> files = fs.getStoreFiles(getColumnFamilyName()); return openStoreFiles(files); }
LOG.trace("Time to purge deletes set to {}ms in store {}", timeToPurgeDeletes, this); long ttl = determineTTLFromFamily(family); this.memstore = getMemstore(); createCacheConf(family); this.storeEngine = createStoreEngine(this, this.conf, this.comparator); List<HStoreFile> hStoreFiles = loadStoreFiles(); this.storeSize.addAndGet(getStorefilesSize(hStoreFiles, sf -> true)); this.totalUncompressedBytes.addAndGet(getTotalUmcompressedBytes(hStoreFiles)); this.storeEngine.getStoreFileManager().loadFiles(hStoreFiles); this.checksumType = getChecksumType(conf); this.bytesPerChecksum = getBytesPerChecksum(conf); flushRetriesNumber = conf.getInt( "hbase.hstore.flush.retries.number", DEFAULT_FLUSH_RETRIES_NUMBER); LOG.info("Store={}, memstore type={}, storagePolicy={}, verifyBulkLoads={}, " + "parallelPutCountPrintThreshold={}, encoding={}, compression={}", getColumnFamilyName(), memstore.getClass().getSimpleName(), policyName, verifyBulkLoads, parallelPutCountPrintThreshold, family.getDataBlockEncoding(), family.getCompressionType());
@Test public void testGetSplitPoint() throws IOException { ConstantSizeRegionSplitPolicy policy = (ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create(mockRegion, conf); // For no stores, should not split assertFalse(policy.shouldSplit()); assertNull(policy.getSplitPoint()); // Add a store above the requisite size. Should split. HStore mockStore = Mockito.mock(HStore.class); Mockito.doReturn(2000L).when(mockStore).getSize(); Mockito.doReturn(true).when(mockStore).canSplit(); Mockito.doReturn(Optional.of(Bytes.toBytes("store 1 split"))).when(mockStore).getSplitPoint(); stores.add(mockStore); assertEquals("store 1 split", Bytes.toString(policy.getSplitPoint())); // Add a bigger store. The split point should come from that one HStore mockStore2 = Mockito.mock(HStore.class); Mockito.doReturn(4000L).when(mockStore2).getSize(); Mockito.doReturn(true).when(mockStore2).canSplit(); Mockito.doReturn(Optional.of(Bytes.toBytes("store 2 split"))).when(mockStore2).getSplitPoint(); stores.add(mockStore2); assertEquals("store 2 split", Bytes.toString(policy.getSplitPoint())); }
init(this.name.getMethodName()); this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null), null); this.store.add(new KeyValue(row, family, qf2, 1, (byte[])null), null); flush(1); HStoreFile f = this.store.getStorefiles().iterator().next(); Path storedir = f.getPath().getParent(); long seqid = f.getMaxSequenceId(); w.appendMetadata(seqid + 1, false); w.close(); this.store.close(); this.store = new HStore(this.store.getHRegion(), this.store.getColumnFamilyDescriptor(), c); assertEquals(2, this.store.getStorefilesCount());
long storeMemstoreSize = store.getMemStoreSize().getHeapSize(); long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); MemStoreSize mss = store.getFlushableSize(); long storeSize = store.getSize(); long storeSizeUncompressed = store.getStoreSizeUncompressed(); if (flushDesc.getAction() == FlushAction.START_FLUSH) { LOG.info("-- Replaying flush start in secondary"); long newStoreMemstoreSize = store.getMemStoreSize().getHeapSize(); LOG.info("Memstore size reduced by:" + StringUtils.humanReadableInt(newStoreMemstoreSize - storeMemstoreSize)); assertEquals(expectedStoreFileCount, s.getStorefilesCount()); MemStoreSize newMss = store.getFlushableSize(); assertTrue(mss.getHeapSize() > newMss.getHeapSize()); assertTrue(store.getSize() > storeSize); assertTrue(store.getStoreSizeUncompressed() > storeSizeUncompressed); assertEquals(store.getSize(), store.getStorefilesSize()); if (store.getColumnFamilyName().equals("cf1")) { assertEquals(1, store.getStorefilesCount()); } else { assertEquals(expectedStoreFileCount, store.getStorefilesCount()); verifyData(primaryRegion, 0, lastReplayed, cq, families); for (HStore store : primaryRegion.getStores()) { if (store.getColumnFamilyName().equals("cf1")) {
boolean isLocalIndexKey = false; for (HStore s : stores) { if (s.getColumnFamilyName() .startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { Optional<byte[]> splitPoint = s.getSplitPoint(); if (oldSplitPoint != null && splitPoint.isPresent() && Bytes.compareTo(oldSplitPoint, splitPoint.get()) == 0) { if (!s.getColumnFamilyName() .startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { Optional<byte[]> splitPoint = s.getSplitPoint(); long storeSize = s.getSize(); if (splitPoint.isPresent() && largestStoreSize < storeSize) { splitPointFromLargestStore = splitPoint.get();
/** * Test for HBASE-5920 - Test user requested major compactions always occurring */ @Test public void testNonUserMajorCompactionRequest() throws Exception { HStore store = r.getStore(COLUMN_FAMILY); createStoreFile(r); for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) { createStoreFile(r); } store.triggerMajorCompaction(); CompactionRequestImpl request = store.requestCompaction().get().getRequest(); assertNotNull("Expected to receive a compaction request", request); assertEquals( "System-requested major compaction should not occur if there are too many store files", false, request.isMajor()); }
@Test public void testLowestModificationTime() throws Exception { Configuration conf = HBaseConfiguration.create(); FileSystem fs = FileSystem.get(conf); // Initialize region init(name.getMethodName(), conf); int storeFileNum = 4; for (int i = 1; i <= storeFileNum; i++) { LOG.info("Adding some data for the store file #"+i); this.store.add(new KeyValue(row, family, qf1, i, (byte[])null), null); this.store.add(new KeyValue(row, family, qf2, i, (byte[])null), null); this.store.add(new KeyValue(row, family, qf3, i, (byte[])null), null); flush(i); } // after flush; check the lowest time stamp long lowestTimeStampFromManager = StoreUtils.getLowestTimestamp(store.getStorefiles()); long lowestTimeStampFromFS = getLowestTimeStampFromFS(fs, store.getStorefiles()); assertEquals(lowestTimeStampFromManager,lowestTimeStampFromFS); // after compact; check the lowest time stamp store.compact(store.requestCompaction().get(), NoLimitThroughputController.INSTANCE, null); lowestTimeStampFromManager = StoreUtils.getLowestTimestamp(store.getStorefiles()); lowestTimeStampFromFS = getLowestTimeStampFromFS(fs, store.getStorefiles()); assertEquals(lowestTimeStampFromManager, lowestTimeStampFromFS); }
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); HStore store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); cacheConf.setEvictOnClose(true); usedBlocksFound = false; System.out.println("Compacting"); assertEquals(2, store.getStorefilesCount()); store.triggerMajorCompaction(); region.compact(true); waitForStoreFileCount(store, 1, 10000); // wait 10 seconds max assertEquals(1, store.getStorefilesCount());
@Override public boolean hasTooManyStoreFiles() { return getStorefilesCount() > this.blockingFileCount; }
"ms in store " + this); long ttl = determineTTLFromFamily(family); this.storeEngine.getStoreFileManager().loadFiles(loadStoreFiles()); this.checksumType = getChecksumType(conf); this.bytesPerChecksum = getBytesPerChecksum(conf); flushRetriesNumber = conf.getInt( "hbase.hstore.flush.retries.number", DEFAULT_FLUSH_RETRIES_NUMBER);
public DefaultMobStoreFlusher(Configuration conf, HStore store) throws IOException { super(conf, store); if (!(store instanceof HMobStore)) { throw new IllegalArgumentException("The store " + store + " is not a HMobStore"); } mobCellValueSizeThreshold = store.getColumnFamilyDescriptor().getMobThreshold(); this.targetPath = MobUtils.getMobFamilyPath(conf, store.getTableName(), store.getColumnFamilyName()); if (!this.store.getFileSystem().exists(targetPath)) { this.store.getFileSystem().mkdirs(targetPath); } this.mobStore = (HMobStore) store; }
insertData(tableName, admin, t); int fileNum = store.getStorefiles().size(); store.triggerMajorCompaction(); Optional<CompactionContext> cc = store.requestCompaction(); assertTrue(cc.isPresent()); assertTrue(fileNum > store.getStorefiles().size());
public Path bulkLoadHFile(byte[] family, String srcPathStr, Path dstPath) throws IOException { Path srcPath = new Path(srcPathStr); try { fs.commitStoreFile(srcPath, dstPath); } finally { if (this.getCoprocessorHost() != null) { this.getCoprocessorHost().postCommitStoreFile(family, srcPath, dstPath); } } LOG.info("Loaded HFile " + srcPath + " into store '" + getColumnFamilyName() + "' as " + dstPath + " - updating store file list."); HStoreFile sf = createStoreFileAndReader(dstPath); bulkLoadHFile(sf); LOG.info("Successfully loaded store file {} into store {} (new location: {})", srcPath, this, dstPath); return dstPath; }
.getRegion(regionName); HStore store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); cacheConf.setEvictOnClose(true); assertEquals(2, store.getStorefilesCount()); store.triggerMajorCompaction(); region.compact(true); store.closeAndArchiveCompactedFiles(); waitForStoreFileCount(store, 1, 10000); // wait 10 seconds max assertEquals(1, store.getStorefilesCount()); expectedBlockCount -= 2; // evicted two blocks, cached none assertEquals(expectedBlockCount, cache.getBlockCount());