@Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment && ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable() != null && ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable() .equals(TableName.META_TABLE_NAME)) { regionCoprocessorEnv = (RegionCoprocessorEnvironment) env; observer = new ExampleRegionObserverMeta(); requestsMap = new ConcurrentHashMap<>(); clientMetricsLossyCounting = new LossyCounting(); // only be active mode when this region holds meta table. active = true; } else { observer = new ExampleRegionObserverMeta(); } }
private boolean isMetaTableOp(ObserverContext<RegionCoprocessorEnvironment> e) { return TableName.META_TABLE_NAME .equals(e.getEnvironment().getRegionInfo().getTable()); }
@Override @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", justification="NPE should never happen; if it does it is a bigger issue") public void preCommitStoreFile(final ObserverContext<RegionCoprocessorEnvironment> ctx, final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException { RegionCoprocessorEnvironment env = ctx.getEnvironment(); Configuration c = env.getConfiguration(); if (pairs == null || pairs.isEmpty() || !c.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT)) { LOG.debug("Skipping recording bulk load entries in preCommitStoreFile for bulkloaded " + "data replication."); return; } // This is completely cheating AND getting a HRegionServer from a RegionServerEnvironment is // just going to break. This is all private. Not allowed. Regions shouldn't assume they are // hosted in a RegionServer. TODO: fix. RegionServerServices rss = ((HasRegionServerServices)env).getRegionServerServices(); Replication rep = (Replication)((HRegionServer)rss).getReplicationSourceService(); rep.addHFileRefsToQueue(env.getRegionInfo().getTable(), family, pairs); } }
@Override public void prePut(ObserverContext<RegionCoprocessorEnvironment> c, Put put, WALEdit edit, Durability durability) throws IOException { if (FAIL && c.getEnvironment().getRegionInfo().isMetaRegion()) { throw new IOException("Inject error"); } } }
@Override public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> c, Get get, List<Cell> result) throws IOException { if (FAIL && c.getEnvironment().getRegionInfo().isMetaRegion()) { throw new IOException("Inject error"); } }
@Override public boolean postScannerNext(ObserverContext<RegionCoprocessorEnvironment> c, InternalScanner s, List<Result> result, int limit, boolean hasNext) throws IOException { if (c.getEnvironment().getRegionInfo().isMetaRegion()) { CONCURRENCY.decrementAndGet(); } return hasNext; } }
String tableName = ((RegionCoprocessorEnvironment)env).getRegionInfo().getTable().getNameAsString(); LOG.error("Removing coprocessor '" + env.toString() + "' from table '"+ tableName + "'", e); } else {
@Override public boolean preScannerNext(final ObserverContext<RegionCoprocessorEnvironment> e, final InternalScanner s, final List<Result> results, final int limit, final boolean hasMore) throws IOException { final TableName tableName = e.getEnvironment().getRegionInfo().getTable(); if (!tableName.isSystemTable() && (faults++ % 2) == 0) { LOG.debug(" Injecting fault in table=" + tableName + " scanner"); throw new IOException("injected fault"); } return hasMore; } }
@Override public void preFlush(ObserverContext<RegionCoprocessorEnvironment> e, FlushLifeCycleTracker tracker) throws IOException { if (delayFlush) { try { if (Bytes.compareTo(e.getEnvironment().getRegionInfo().getStartKey(), HConstants.EMPTY_START_ROW) != 0) { Thread.sleep(100); } } catch (InterruptedException e1) { throw new InterruptedIOException(e1.getMessage()); } } } }
@Override public void preCommitStoreFile(final ObserverContext<RegionCoprocessorEnvironment> ctx, final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException { Configuration cfg = ctx.getEnvironment().getConfiguration(); if (pairs == null || pairs.isEmpty() || !BackupManager.isBackupEnabled(cfg)) { LOG.debug("skipping recording bulk load in preCommitStoreFile since backup is disabled"); return; } try (Connection connection = ConnectionFactory.createConnection(cfg); BackupSystemTable tbl = new BackupSystemTable(connection)) { List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL); RegionInfo info = ctx.getEnvironment().getRegionInfo(); TableName tableName = info.getTable(); if (!fullyBackedUpTables.contains(tableName)) { if (LOG.isTraceEnabled()) { LOG.trace(tableName + " has not gone thru full backup"); } return; } tbl.writeFilesForBulkLoadPreCommit(tableName, info.getEncodedNameAsBytes(), family, pairs); return; } } }
@Override public void postBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx, List<Pair<byte[], String>> stagingFamilyPaths, Map<byte[], List<Path>> finalPaths) throws IOException { Configuration cfg = ctx.getEnvironment().getConfiguration(); if (finalPaths == null) { // there is no need to record state return; } if (!BackupManager.isBackupEnabled(cfg)) { LOG.debug("skipping recording bulk load in postBulkLoadHFile since backup is disabled"); return; } try (Connection connection = ConnectionFactory.createConnection(cfg); BackupSystemTable tbl = new BackupSystemTable(connection)) { List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL); RegionInfo info = ctx.getEnvironment().getRegionInfo(); TableName tableName = info.getTable(); if (!fullyBackedUpTables.contains(tableName)) { if (LOG.isTraceEnabled()) { LOG.trace(tableName + " has not gone thru full backup"); } return; } tbl.writePathsPostBulkLoad(tableName, info.getEncodedNameAsBytes(), finalPaths); } catch (IOException ioe) { LOG.error("Failed to get tables which have been fully backed up", ioe); } } @Override
private void recordAndTryFail(ObserverContext<RegionCoprocessorEnvironment> c) throws IOException { RegionInfo region = c.getEnvironment().getRegionInfo(); if (!region.getTable().equals(TABLE_NAME)) { return; } REPLICA_ID_TO_COUNT.computeIfAbsent(region.getReplicaId(), k -> new AtomicInteger()) .incrementAndGet(); if (region.getRegionId() == RegionReplicaUtil.DEFAULT_REPLICA_ID && FAIL_PRIMARY_GET) { throw new IOException("Inject error"); } }
@Override public boolean preScannerNext(ObserverContext<RegionCoprocessorEnvironment> c, InternalScanner s, List<Result> result, int limit, boolean hasNext) throws IOException { if (c.getEnvironment().getRegionInfo().isMetaRegion()) { int concurrency = CONCURRENCY.incrementAndGet(); for (;;) { int max = MAX_CONCURRENCY.get(); if (concurrency <= max) { break; } if (MAX_CONCURRENCY.compareAndSet(max, concurrency)) { break; } } Threads.sleepWithoutInterrupt(10); } return hasNext; }
@Override public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> e, Get get, List<Cell> results) throws IOException { if (e.getEnvironment().getRegionInfo().getEndKey().length == 0) { throw new DoNotRetryRegionException("Inject Error"); } } }
private BulkLoadHFileRequest ConvertSecureBulkLoadHFilesRequest( SecureBulkLoadHFilesRequest request) { BulkLoadHFileRequest.Builder bulkLoadHFileRequest = BulkLoadHFileRequest.newBuilder(); RegionSpecifier region = ProtobufUtil.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, this.env .getRegionInfo().getRegionName()); bulkLoadHFileRequest.setRegion(region).setFsToken(request.getFsToken()) .setBulkToken(request.getBulkToken()).setAssignSeqNum(request.getAssignSeqNum()) .addAllFamilyPath(request.getFamilyPathList()); return bulkLoadHFileRequest.build(); }
/** * Determines if statistics are enabled (which is the default). This is done on the * RegionCoprocessorEnvironment for now to allow setting this on a per-table basis, although * it could be moved to the general table metadata in the future if there is a realistic * use case for that. */ private static boolean statisticsEnabled(RegionCoprocessorEnvironment env) { return (env.getConfiguration().getBoolean(STATS_COLLECTION_ENABLED, DEFAULT_STATS_COLLECTION_ENABLED)) && StatisticsUtil.isStatsEnabled(env.getRegionInfo().getTable()); }
@Override public void postBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException { try { String tableName = c.getEnvironment().getRegionInfo().getTable().getNameAsString(); if (tableName.startsWith(MVCC_LOCK_TEST_TABLE_PREFIX)) { Thread.sleep(ROW_LOCK_WAIT_TIME/2); // Wait long enough that they'll both have the same mvcc } } catch (InterruptedException e) { } }
@Override public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws HBaseIOException { // model a slow batch that takes a long time if ((miniBatchOp.size()==100 || SLOW_MUTATE) && c.getEnvironment().getRegionInfo().getTable().getNameAsString().equals(dataTable)) { try { Thread.sleep(6000); } catch (InterruptedException e) { e.printStackTrace(); } } } }
@Override public void postBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException { attempts.incrementAndGet(); throw new DoNotRetryIOException("Simulating write failure on " + c.getEnvironment().getRegionInfo().getTable().getNameAsString()); } }
@Override public void postBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException { // we need to advance the clock, since the index retry logic (copied from HBase) has a time component EnvironmentEdge delegate = EnvironmentEdgeManager.getDelegate(); if (delegate instanceof MyClock) { MyClock myClock = (MyClock) delegate; myClock.time += 1000; } throw new DoNotRetryIOException("Simulating write failure on " + c.getEnvironment().getRegionInfo().getTable().getNameAsString()); } }