RegionOp(final Region region) throws IOException { this.region = region; region.startRegionOperation(); }
private long collectStatsInternal() throws IOException { long startTime = EnvironmentEdgeManager.currentTimeMillis(); region.startRegionOperation(); boolean hasMore = false; boolean noErrors = false;
RegionOp(final Region region) throws IOException { this.region = region; region.startRegionOperation(); }
RegionOp(final Region region) throws IOException { this.region = region; region.startRegionOperation(); }
final Tuple firstTuple; final Region region = getRegion(); region.startRegionOperation(); try { Tuple tuple = iterator.next();
boolean acquiredLock = false; try { region.startRegionOperation(); acquiredLock = true; synchronized (scanner) {
boolean acquiredLock = false; try { region.startRegionOperation(); acquiredLock = true; synchronized (scanner) {
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES); MutationList mutations = new MutationList(maxBatchSize); region.startRegionOperation(); byte[] uuidValue = ServerCacheClient.generateId(); synchronized (innerScanner) {
byte[] row = append.getRow(); List<RowLock> locks = Lists.newArrayList(); region.startRegionOperation(); try { ServerUtil.acquireLock(region, row, locks);
final MemoryManager.MemoryChunk chunk = tenantCache.getMemoryManager().allocate(estSize); final Region region = getRegion(); region.startRegionOperation(); try {
List<RowLock> locks = Lists.newArrayList(); TimeRange tr = increment.getTimeRange(); region.startRegionOperation(); try { ServerUtil.acquireLock(region, row, locks);
region.startRegionOperation(); acquiredLock = true; synchronized (innerScanner) {
@Override public Void call() throws Exception { LOG.debug("Starting region operation on " + region); region.startRegionOperation(); try { LOG.debug("Flush region " + region.toString() + " started..."); region.flush(true); } finally { LOG.debug("Closing region operation on " + region); region.closeRegionOperation(); } return null; } }
region.startRegionOperation(); try { if (snapshotSkipFlush) {
private long collectStatsInternal() throws IOException { long startTime = EnvironmentEdgeManager.currentTimeMillis(); region.startRegionOperation(); boolean hasMore = false; boolean noErrors = false;
private long collectStatsInternal() throws IOException { long startTime = EnvironmentEdgeManager.currentTimeMillis(); region.startRegionOperation(); boolean hasMore = false; boolean noErrors = false;
final Tuple firstTuple; final Region region = getRegion(); region.startRegionOperation(); try { Tuple tuple = iterator.next();
final MemoryManager.MemoryChunk chunk = tenantCache.getMemoryManager().allocate(estSize); final Region region = getRegion(); region.startRegionOperation(); try {
boolean forcible = request.getForcible(); long masterSystemTime = request.hasMasterSystemTime() ? request.getMasterSystemTime() : -1; regionA.startRegionOperation(Operation.MERGE_REGION); regionB.startRegionOperation(Operation.MERGE_REGION); if (regionA.getRegionInfo().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID || regionB.getRegionInfo().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
requestCount.increment(); Region region = getRegion(request.getRegion()); region.startRegionOperation(Operation.SPLIT_REGION); if (region.getRegionInfo().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { throw new IOException("Can't split replicas directly. "