long getSmallestReadPoint(HStore store) { return store.getHRegion().getSmallestReadPoint(); } }
ms = ReflectionUtils.newInstance(DefaultMemStore.class, new Object[] { conf, this.comparator, this.getHRegion().getRegionServicesForStores()}); break; default: CompactingMemStore.class, CompactingMemStore.class); ms = ReflectionUtils.newInstance(clz, new Object[]{conf, this.comparator, this, this.getHRegion().getRegionServicesForStores(), inMemoryCompaction});
/** * Test for HBASE-3492 - Test split on empty colfam (no store files). * * @throws IOException When the IO operations fail. */ @Test public void testSplitWithEmptyColFam() throws IOException { init(this.name.getMethodName()); assertFalse(store.getSplitPoint().isPresent()); store.getHRegion().forceSplit(null); assertFalse(store.getSplitPoint().isPresent()); store.getHRegion().clearSplit(); }
private StoreScanner(HStore store, ScanInfo scanInfo, List<? extends KeyValueScanner> scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException { this(store, SCAN_FOR_COMPACTION, scanInfo, 0, store.getHRegion().getReadPoint(IsolationLevel.READ_COMMITTED), false, scanType); assert scanType != ScanType.USER_SCAN; matcher = CompactionScanQueryMatcher.create(scanInfo, scanType, smallestReadPoint, earliestPutTs, oldestUnexpiredTS, now, dropDeletesFromRow, dropDeletesToRow, store.getCoprocessorHost()); // Filter the list of scanners using Bloom filters, time range, TTL, etc. scanners = selectScannersFrom(store, scanners); // Seek all scanners to the initial key seekScanners(scanners, matcher.getStartKey(), false, parallelSeekEnabled); addCurrentScanners(scanners); // Combine all seeked scanners with a heap resetKVHeap(scanners, comparator); }
/**---------------------------------------------------------------------- * The request to dispatch the compaction asynchronous task. * The method returns true if compaction was successfully dispatched, or false if there * is already an ongoing compaction or no segments to compact. */ public boolean start() throws IOException { if (!compactingMemStore.hasImmutableSegments()) { // no compaction on empty pipeline return false; } // get a snapshot of the list of the segments from the pipeline, // this local copy of the list is marked with specific version versionedList = compactingMemStore.getImmutableSegments(); LOG.trace("Speculative compaction starting on {}/{}", compactingMemStore.getStore().getHRegion().getRegionInfo().getEncodedName(), compactingMemStore.getStore().getColumnFamilyName()); HStore store = compactingMemStore.getStore(); RegionCoprocessorHost cpHost = store.getCoprocessorHost(); if (cpHost != null) { cpHost.preMemStoreCompaction(store); } try { doCompaction(); } finally { if (cpHost != null) { cpHost.postMemStoreCompaction(store); } } return true; }
@Override public Object run() throws Exception { // Make sure it worked (above is sensitive to caching details in hadoop core) FileSystem fs = FileSystem.get(conf); assertEquals(FaultyFileSystem.class, fs.getClass()); // Initialize region init(name.getMethodName(), conf); LOG.info("Adding some data"); store.add(new KeyValue(row, family, qf1, 1, (byte[])null), null); store.add(new KeyValue(row, family, qf2, 1, (byte[])null), null); store.add(new KeyValue(row, family, qf3, 1, (byte[])null), null); LOG.info("Before flush, we should have no files"); Collection<StoreFileInfo> files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName()); assertEquals(0, files != null ? files.size() : 0); //flush try { LOG.info("Flushing"); flush(1); fail("Didn't bubble up IOE!"); } catch (IOException ioe) { assertTrue(ioe.getMessage().contains("Fault injected")); } LOG.info("After failed flush, we should still have no files!"); files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName()); assertEquals(0, files != null ? files.size() : 0); store.getHRegion().getWAL().close(); return null; } });
this.store.close(); this.store = new HStore(this.store.getHRegion(), this.store.getColumnFamilyDescriptor(), c); assertEquals(2, this.store.getStorefilesCount());
RegionServerServices rsService = store.getHRegion().getRegionServerServices(); if (rsService != null && scanInfo.isParallelSeekEnabled()) { this.parallelSeekEnabled = true;
long getSmallestReadPoint(HStore store) { return store.getHRegion().getSmallestReadPoint(); } }
/** * Test for HBASE-3492 - Test split on empty colfam (no store files). * * @throws IOException When the IO operations fail. */ @Test public void testSplitWithEmptyColFam() throws IOException { init(this.name.getMethodName()); assertFalse(store.getSplitPoint().isPresent()); store.getHRegion().forceSplit(null); assertFalse(store.getSplitPoint().isPresent()); store.getHRegion().clearSplit(); }
private StoreScanner(Store store, ScanInfo scanInfo, Scan scan, List<? extends KeyValueScanner> scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException { this(store, scan, scanInfo, null, ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED), false); if (dropDeletesFromRow == null) { matcher = new ScanQueryMatcher(scan, scanInfo, null, scanType, smallestReadPoint, earliestPutTs, oldestUnexpiredTS, now, store.getCoprocessorHost()); } else { matcher = new ScanQueryMatcher(scan, scanInfo, null, smallestReadPoint, earliestPutTs, oldestUnexpiredTS, now, dropDeletesFromRow, dropDeletesToRow, store.getCoprocessorHost()); } // Filter the list of scanners using Bloom filters, time range, TTL, etc. scanners = selectScannersFrom(scanners); // Seek all scanners to the initial key seekScanners(scanners, matcher.getStartKey(), false, parallelSeekEnabled); // Combine all seeked scanners with a heap resetKVHeap(scanners, store.getComparator()); }
RegionServerServices rsService = ((HStore)store).getHRegion().getRegionServerServices(); if (rsService != null && scanInfo.isParallelSeekEnabled()) { this.parallelSeekEnabled = true;
@Override public Object run() throws Exception { // Make sure it worked (above is sensitive to caching details in hadoop core) FileSystem fs = FileSystem.get(conf); assertEquals(FaultyFileSystem.class, fs.getClass()); // Initialize region init(name.getMethodName(), conf); LOG.info("Adding some data"); store.add(new KeyValue(row, family, qf1, 1, (byte[])null), null); store.add(new KeyValue(row, family, qf2, 1, (byte[])null), null); store.add(new KeyValue(row, family, qf3, 1, (byte[])null), null); LOG.info("Before flush, we should have no files"); Collection<StoreFileInfo> files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName()); assertEquals(0, files != null ? files.size() : 0); //flush try { LOG.info("Flushing"); flush(1); fail("Didn't bubble up IOE!"); } catch (IOException ioe) { assertTrue(ioe.getMessage().contains("Fault injected")); } LOG.info("After failed flush, we should still have no files!"); files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName()); assertEquals(0, files != null ? files.size() : 0); store.getHRegion().getWAL().close(); return null; } });
this.store.close(); this.store = new HStore(this.store.getHRegion(), this.store.getColumnFamilyDescriptor(), c); assertEquals(2, this.store.getStorefilesCount());