public RegionCoprocessorHost getCoprocessorHost() { return this.region.getCoprocessorHost(); }
boolean postScannerNext(final InternalScanner s, final List<Result> results, final int limit, boolean hasMore) throws IOException { if (region.getCoprocessorHost() == null) { return false; } else { return region.getCoprocessorHost().postScannerNext(s, results, limit, hasMore); } } }
boolean preScannerNext(final InternalScanner s, final List<Result> results, final int limit) throws IOException { if (region.getCoprocessorHost() == null) { return false; } else { Boolean bypass = region.getCoprocessorHost().preScannerNext(s, results, limit); return bypass == null ? false : bypass; } }
RegionScanner checkScannerOpen(final Scan scan) throws IOException { RegionScanner scanner; if (region.getCoprocessorHost() == null) { scanner = region.getScanner(scan); } else { region.getCoprocessorHost().preScannerOpen(scan); scanner = region.getScanner(scan); scanner = region.getCoprocessorHost().postScannerOpen(scan, scanner); } if (scanner == null) { throw new IOException("Failed to open region scanner"); } return scanner; }
void checkScannerClose(final InternalScanner s) throws IOException { if (s == null) { return; } if (region.getCoprocessorHost() == null) { s.close(); return; } region.getCoprocessorHost().preScannerClose(s); try { s.close(); } finally { region.getCoprocessorHost().postScannerClose(s); } }
private void closeScanner(HRegion region, RegionScanner scanner, String scannerName, RpcCallContext context) throws IOException { if (region.getCoprocessorHost() != null) { if (region.getCoprocessorHost().preScannerClose(scanner)) { // bypass the actual close. return; } } RegionScannerHolder rsh = scanners.remove(scannerName); if (rsh != null) { if (context != null) { context.setCallBack(rsh.closeCallBack); } else { rsh.s.close(); } if (region.getCoprocessorHost() != null) { region.getCoprocessorHost().postScannerClose(scanner); } closedScanners.put(scannerName, scannerName); } }
@Override protected boolean nextRow(ScannerContext scannerContext, Cell curRowCell) throws IOException { assert super.joinedContinuationRow == null : "Trying to go to next row during joinedHeap read."; this.storeHeap.seekToPreviousRow(PrivateCellUtil.createFirstOnRow(curRowCell)); resetFilters(); // Calling the hook in CP which allows it to do a fast forward if (this.region.getCoprocessorHost() != null) { return this.region.getCoprocessorHost().postScannerFilterRow(this, curRowCell); } return true; }
protected boolean nextRow(ScannerContext scannerContext, Cell curRowCell) throws IOException { assert this.joinedContinuationRow == null: "Trying to go to next row during joinedHeap read."; Cell next; while ((next = this.storeHeap.peek()) != null && CellUtil.matchingRows(next, curRowCell)) { this.storeHeap.next(MOCKED_LIST); } resetFilters(); // Calling the hook in CP which allows it to do a fast forward return this.region.getCoprocessorHost() == null || this.region.getCoprocessorHost() .postScannerFilterRow(this, curRowCell); }
public String[] getRegionServerCoprocessors() { TreeSet<String> coprocessors = new TreeSet<>(); try { coprocessors.addAll(getWAL(null).getCoprocessorHost().getCoprocessors()); } catch (IOException exception) { LOG.warn("Exception attempting to fetch wal coprocessor information for the common wal; " + "skipping."); LOG.debug("Exception details for failure to fetch wal coprocessor information.", exception); } Collection<HRegion> regions = getOnlineRegionsLocalContext(); for (HRegion region: regions) { coprocessors.addAll(region.getCoprocessorHost().getCoprocessors()); try { coprocessors.addAll(getWAL(region.getRegionInfo()).getCoprocessorHost().getCoprocessors()); } catch (IOException exception) { LOG.warn("Exception attempting to fetch wal coprocessor information for region " + region + "; skipping."); LOG.debug("Exception details for failure to fetch wal coprocessor information.", exception); } } coprocessors.addAll(rsHost.getCoprocessors()); return coprocessors.toArray(new String[coprocessors.size()]); }
public String prepareBulkLoad(final HRegion region, final PrepareBulkLoadRequest request) throws IOException { User user = getActiveUser(); region.getCoprocessorHost().prePrepareBulkLoad(user); String bulkToken = createStagingDir(baseStagingDir, user, region.getTableDescriptor().getTableName()) .toString(); return bulkToken; }
private static <T extends RegionObserver> T find(final TableName tableName, Class<T> clz) throws IOException, InterruptedException { HRegion region = find(tableName); Coprocessor cp = region.getCoprocessorHost().findCoprocessor(clz.getName()); assertTrue("The cp instance should be " + clz.getName() + ", current instance is " + cp.getClass().getName(), clz.isInstance(cp)); return clz.cast(cp); }
public void cleanupBulkLoad(final HRegion region, final CleanupBulkLoadRequest request) throws IOException { try { region.getCoprocessorHost().preCleanupBulkLoad(getActiveUser()); Path path = new Path(request.getBulkToken()); if (!fs.delete(path, true)) { if (fs.exists(path)) { throw new IOException("Failed to clean up " + path); } } LOG.info("Cleaned up " + path + " successfully."); } finally { UserGroupInformation ugi = getActiveUser().getUGI(); try { if (!UserGroupInformation.getLoginUser().equals(ugi) && !isUserReferenced(ugi)) { FileSystem.closeAllForUGI(ugi); } } catch (IOException e) { LOG.error("Failed to close FileSystem for: " + ugi, e); } } }
private HStoreFile createStoreFileAndReader(StoreFileInfo info) throws IOException { info.setRegionCoprocessorHost(this.region.getCoprocessorHost()); HStoreFile storeFile = new HStoreFile(this.getFileSystem(), info, this.conf, this.cacheConf, this.family.getBloomFilterType(), isPrimaryReplicaStore()); storeFile.initReader(); return storeFile; }
private static List<AccessController> getAccessControllers(MiniHBaseCluster cluster) { List<AccessController> result = Lists.newArrayList(); for (RegionServerThread t: cluster.getLiveRegionServerThreads()) { for (HRegion region: t.getRegionServer().getOnlineRegionsLocalContext()) { Coprocessor cp = region.getCoprocessorHost().findCoprocessor(AccessController.class); if (cp != null) { result.add((AccessController)cp); } } } return result; }
@Test public void testBulkLoadReplicationEnabled() throws IOException { TEST_UTIL.getConfiguration().setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true); final ServerName serverName = ServerName.valueOf(name.getMethodName(), 100, 42); final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName)); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName())); htd.addFamily(new HColumnDescriptor(fam1)); HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY); region = HRegion.openHRegion(hri, htd, rss.getWAL(hri), TEST_UTIL.getConfiguration(), rss, null); assertTrue(region.conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, false)); String plugins = region.conf.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, ""); String replicationCoprocessorClass = ReplicationObserver.class.getCanonicalName(); assertTrue(plugins.contains(replicationCoprocessorClass)); assertTrue(region.getCoprocessorHost(). getCoprocessors().contains(ReplicationObserver.class.getSimpleName())); }
@Test public void testRegionObserverScanTimeStacking() throws Exception { byte[] ROW = Bytes.toBytes("testRow"); byte[] TABLE = Bytes.toBytes(getClass().getName()); byte[] A = Bytes.toBytes("A"); byte[][] FAMILIES = new byte[][] { A }; // Use new HTU to not overlap with the DFS cluster started in #CompactionStacking Configuration conf = new HBaseTestingUtility().getConfiguration(); HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES); RegionCoprocessorHost h = region.getCoprocessorHost(); h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf); h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf); Put put = new Put(ROW); put.addColumn(A, A, A); region.put(put); Get get = new Get(ROW); Result r = region.get(get); assertNull( "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: " + r, r.listCells()); HBaseTestingUtility.closeRegionAndWAL(region); }
private static void testDataInMemoryWithoutWAL(HRegion region, Put originalPut, final Put addPut, long delta) throws IOException { final long initSize = region.getDataInMemoryWithoutWAL(); // save normalCPHost and replaced by mockedCPHost RegionCoprocessorHost normalCPHost = region.getCoprocessorHost(); RegionCoprocessorHost mockedCPHost = Mockito.mock(RegionCoprocessorHost.class); // Because the preBatchMutate returns void, we can't do usual Mockito when...then form. Must // do below format (from Mockito doc). Mockito.doAnswer(new Answer<Void>() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { MiniBatchOperationInProgress<Mutation> mb = invocation.getArgument(0); mb.addOperationsFromCP(0, new Mutation[]{addPut}); return null; } }).when(mockedCPHost).preBatchMutate(Mockito.isA(MiniBatchOperationInProgress.class)); region.setCoprocessorHost(mockedCPHost); region.put(originalPut); region.setCoprocessorHost(normalCPHost); final long finalSize = region.getDataInMemoryWithoutWAL(); assertEquals("finalSize:" + finalSize + ", initSize:" + initSize + ", delta:" + delta,finalSize, initSize + delta); }
/** * Assert that when a Coprocessor is annotated with CoreCoprocessor, then it is possible to * access a RegionServerServices instance. Assert the opposite too. * Do it to RegionCoprocessors. * @throws IOException */ @Test public void testCoreRegionCoprocessor() throws IOException { RegionCoprocessorHost rch = region.getCoprocessorHost(); RegionCoprocessorEnvironment env = rch.load(null, NotCoreRegionCoprocessor.class.getName(), 0, HTU.getConfiguration()); assertFalse(env instanceof HasRegionServerServices); env = rch.load(null, CoreRegionCoprocessor.class.getName(), 1, HTU.getConfiguration()); assertTrue(env instanceof HasRegionServerServices); assertEquals(this.rss, ((HasRegionServerServices)env).getRegionServerServices()); } }
@Test // HBASE-3516: Test CP Class loading from local file system public void testClassLoadingFromLocalFS() throws Exception { File jarFile = buildCoprocessorJar(cpName3); // create a table that references the jar HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(cpName3)); htd.addFamily(new HColumnDescriptor("test")); htd.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName3 + "|" + Coprocessor.PRIORITY_USER); Admin admin = TEST_UTIL.getAdmin(); admin.createTable(htd); waitForTable(htd.getTableName()); // verify that the coprocessor was loaded boolean found = false; MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName3)) { found = (region.getCoprocessorHost().findCoprocessor(cpName3) != null); } } assertTrue("Class " + cpName3 + " was missing on a region", found); }
@Override public Void run() throws Exception { Path p = runWALSplit(newConf); LOG.info("WALSplit path == " + p); // Make a new wal for new region open. final WALFactory wals2 = new WALFactory(conf, ServerName.valueOf(currentTest.getMethodName() + "2", 16010, System.currentTimeMillis()) .toString()); WAL wal2 = wals2.getWAL(null); HRegion region = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir, hri, htd, wal2, TEST_UTIL.getHBaseCluster().getRegionServer(0), null); SampleRegionWALCoprocessor cp2 = region.getCoprocessorHost().findCoprocessor(SampleRegionWALCoprocessor.class); // TODO: asserting here is problematic. assertNotNull(cp2); assertTrue(cp2.isPreWALRestoreCalled()); assertTrue(cp2.isPostWALRestoreCalled()); region.close(); wals2.close(); return null; } });