@Override public void export(RpcController controller, ExportProtos.ExportRequest request, RpcCallback<ExportProtos.ExportResponse> done) { Region region = env.getRegion(); Configuration conf = HBaseConfiguration.create(env.getConfiguration()); conf.setStrings("io.serializations", conf.get("io.serializations"), ResultSerialization.class.getName()); try { Scan scan = validateKey(region.getRegionInfo(), request); Token userToken = null; if (userProvider.isHadoopSecurityEnabled() && !request.hasFsToken()) { LOG.warn("Hadoop security is enable, but no found of user token"); } else if (userProvider.isHadoopSecurityEnabled()) { userToken = new Token(request.getFsToken().getIdentifier().toByteArray(), request.getFsToken().getPassword().toByteArray(), new Text(request.getFsToken().getKind()), new Text(request.getFsToken().getService())); } ExportProtos.ExportResponse response = processData(region, conf, userProvider, scan, userToken, getWriterOptions(conf, region.getRegionInfo(), request)); done.run(response); } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); LOG.error(e.toString(), e); } }
@Override @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", justification="NPE should never happen; if it does it is a bigger issue") public void preCommitStoreFile(final ObserverContext<RegionCoprocessorEnvironment> ctx, final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException { RegionCoprocessorEnvironment env = ctx.getEnvironment(); Configuration c = env.getConfiguration(); if (pairs == null || pairs.isEmpty() || !c.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT)) { LOG.debug("Skipping recording bulk load entries in preCommitStoreFile for bulkloaded " + "data replication."); return; } // This is completely cheating AND getting a HRegionServer from a RegionServerEnvironment is // just going to break. This is all private. Not allowed. Regions shouldn't assume they are // hosted in a RegionServer. TODO: fix. RegionServerServices rss = ((HasRegionServerServices)env).getRegionServerServices(); Replication rep = (Replication)((HRegionServer)rss).getReplicationSourceService(); rep.addHFileRefsToQueue(env.getRegionInfo().getTable(), family, pairs); } }
@Override public void start(CoprocessorEnvironment env) throws IOException { RegionCoprocessorEnvironment renv = (RegionCoprocessorEnvironment) env; try { this.cache = ((ZKDataHolder) renv.getSharedData().computeIfAbsent(ZKKEY, k -> { String ensemble = renv.getConfiguration().get(ZK_ENSEMBLE_KEY); int sessionTimeout = renv.getConfiguration().getInt(ZK_SESSION_TIMEOUT_KEY, ZK_SESSION_TIMEOUT_DEFAULT); return new ZKDataHolder(ensemble, sessionTimeout); })).acquire(); } catch (Exception e) { throw new IOException(e); } }
/****************************** Region related hooks ******************************/ @Override public void postOpen(ObserverContext<RegionCoprocessorEnvironment> e) { // Read the entire labels table and populate the zk if (e.getEnvironment().getRegion().getRegionInfo().getTable().equals(LABELS_TABLE_NAME)) { this.labelsRegion = true; synchronized (this) { this.accessControllerAvailable = CoprocessorHost.getLoadedCoprocessors() .contains(AccessController.class.getName()); } initVisibilityLabelService(e.getEnvironment()); } else { checkAuths = e.getEnvironment().getConfiguration() .getBoolean(VisibilityConstants.CHECK_AUTHS_FOR_MUTATION, false); initVisibilityLabelService(e.getEnvironment()); } }
private void initialize() throws IOException { // ZK configuration must _not_ have hbase.security.authentication or it will require SASL auth Configuration zkConf = new Configuration(conf); zkConf.set(User.HBASE_SECURITY_CONF_KEY, "simple"); this.zookeeper = new ZKWatcher(zkConf, TokenServer.class.getSimpleName(), this, true); this.rpcServer.start(); // Mock up region coprocessor environment RegionCoprocessorEnvironment mockRegionCpEnv = mock(RegionCoprocessorEnvironment.class, Mockito.withSettings().extraInterfaces(HasRegionServerServices.class)); when(mockRegionCpEnv.getConfiguration()).thenReturn(conf); when(mockRegionCpEnv.getClassLoader()).then( (var1) -> Thread.currentThread().getContextClassLoader()); RegionServerServices mockRss = mock(RegionServerServices.class); when(mockRss.getRpcServer()).thenReturn(rpcServer); when(((HasRegionServerServices) mockRegionCpEnv).getRegionServerServices()) .thenReturn(mockRss); super.start(mockRegionCpEnv); started = true; }
@SuppressWarnings("unchecked") @Test public void testCorrectOrderingWithLazyLoadingColumns() throws Exception { Put m = new Put(row); m.add(fam, qual, ts, val); Configuration conf = new Configuration(false); RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class); Mockito.when(env.getConfiguration()).thenReturn(conf); Mockito.when(env.getRegion()).thenReturn(region); RegionScanner scanner = Mockito.mock(RegionScanner.class); Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner); final byte[] stored = Bytes.toBytes("stored-value"); Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() { @Override LocalTableState table = new LocalTableState(env, state, m); table.addPendingUpdates(m.get(fam, qual));
@SuppressWarnings("unchecked") @Test public void testCorrectOrderingWithLazyLoadingColumns() throws Exception { Put m = new Put(row); m.addColumn(fam, qual, ts, val); Configuration conf = new Configuration(false); RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class); Mockito.when(env.getConfiguration()).thenReturn(conf); Mockito.when(env.getRegion()).thenReturn(region); RegionScanner scanner = Mockito.mock(RegionScanner.class); Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner); final byte[] stored = Bytes.toBytes("stored-value"); Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() { @Override LocalTableState table = new LocalTableState(state, m); table.addPendingUpdates(m.get(fam, qual));
@Override public void start(CoprocessorEnvironment e) throws IOException { if (e instanceof RegionCoprocessorEnvironment) { RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e; Configuration conf = env.getConfiguration(); // co SequentialIdGeneratorObserver-1-Conf Get environment and configuration instances. this.regionName = env.getRegionInfo().getEncodedName(); String family = conf.get("com.larsgeorge.copro.seqidgen.family", "cf1"); this.family = Bytes.toBytes(family); String qualifier = conf.get("com.larsgeorge.copro.seqidgen.qualifier", // co SequentialIdGeneratorObserver-2-Settings Retrieve the settings passed into the configuration. "GENID"); this.qualifier = Bytes.toBytes(qualifier); int startId = conf.getInt("com.larsgeorge.copro.seqidgen.startId", 1); this.delay = conf.getInt("com.larsgeorge.copro.seqidgen.delay", 100); env.getSharedData().putIfAbsent(KEY_ID, new AtomicInteger(startId)); // co SequentialIdGeneratorObserver-3-Gen Set up generator if this has not been done yet on this region server. } else { LOG.warn("Received wrong context."); } }
@Override public void start( @SuppressWarnings("rawtypes") CoprocessorEnvironment env) throws IOException { RegionCoprocessorEnvironment renv = (RegionCoprocessorEnvironment) env; FAMILY_TO_ADD = Bytes.toBytes(renv.getConfiguration().get(FAMILY_TO_ADD_KEY)); QUALIFIER_TO_ADD = Bytes.toBytes(renv.getConfiguration().get(QUALIFIER_TO_ADD_KEY)); }
byte[] estDistValsBytes = scan.getAttribute(ESTIMATED_DISTINCT_VALUES); if (estDistValsBytes != null) { (int) (Bytes.toInt(estDistValsBytes) * 1.5f)); RegionCoprocessorEnvironment env = c.getEnvironment(); Configuration conf = env.getConfiguration(); final boolean spillableEnabled = conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE); HRegion region = c.getEnvironment().getRegion(); MultiVersionConsistencyControl.setThreadReadPoint(s.getMvccReadPoint()); region.startRegionOperation();
@Override public void sum(RpcController controller, SumRequest request, RpcCallback<SumResponse> done) { Scan scan = new Scan(); scan.addColumn(family, qualifier); } else { scan.addFamily(family); scanner = this.env.getRegion().getScanner(scan); List<Cell> curVals = new ArrayList<>(); boolean hasMore = false; for (Cell kv : curVals) { if (CellUtil.matchingQualifier(kv, qualifier)) { sumResult += Bytes.toInt(kv.getValueArray(), kv.getValueOffset());
throws IOException { Scan scan = new Scan().withStartRow(get.getRow()).withStopRow(get.getRow(), true).readAllVersions(); NavigableMap<byte[], NavigableMap<byte[], MutableLong>> sums = new TreeMap<>(Bytes.BYTES_COMPARATOR); }); List<Cell> cells = new ArrayList<>(); try (RegionScanner scanner = c.getEnvironment().getRegion().getScanner(scan)) { boolean moreRows; do { byte[] family = CellUtil.cloneFamily(cell); byte[] qualifier = CellUtil.cloneQualifier(cell); long value = Bytes.toLong(cell.getValueArray(), cell.getValueOffset()); sums.get(family).get(qualifier).add(value); c.bypass();
Mockito.when(env.getRegion()).thenReturn(region); RegionScanner scanner = Mockito.mock(RegionScanner.class); Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner); final KeyValue storedKv = new KeyValue(row, fam, qual, ts, Type.Put, Bytes.toBytes("stored-value")); storedKv.setMemstoreTS(2); Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() { Put pendingUpdate = new Put(row); pendingUpdate.add(fam, qual, ts, val); LocalTableState table = new LocalTableState(env, state, pendingUpdate); s = p.getFirst(); assertEquals("Lost already loaded update!", storedKv, s.next()); Mockito.verify(env, Mockito.times(1)).getRegion(); Mockito.verify(region, Mockito.times(1)).getScanner(Mockito.any(Scan.class));
Mockito.when(env.getRegion()).thenReturn(region); RegionScanner scanner = Mockito.mock(RegionScanner.class); Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner); final KeyValue storedKv = new KeyValue(row, fam, qual, ts, Type.Put, Bytes.toBytes("stored-value")); storedKv.setSequenceId(2); Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() { Put pendingUpdate = new Put(row); pendingUpdate.addColumn(fam, qual, ts, val); LocalTableState table = new LocalTableState(state, pendingUpdate); s = p.getFirst(); assertEquals("Lost already loaded update!", storedKv, s.next()); Mockito.verify(env, Mockito.times(1)).getRegion(); Mockito.verify(region, Mockito.times(1)).getScanner(Mockito.any(Scan.class));
if (env.getConfiguration().getBoolean(ABORT_ON_ERROR_KEY, DEFAULT_ABORT_ON_ERROR)) { String tableName = ((RegionCoprocessorEnvironment)env).getRegionInfo().getTable().getNameAsString(); LOG.error("Removing coprocessor '" + env.toString() + "' from table '"+ tableName + "'", e); } else {
@Override public void postPut(final ObserverContext<RegionCoprocessorEnvironment> c, final Put put, final WALEdit edit, final Durability durability) throws IOException { HRegion region = (HRegion) c.getEnvironment().getRegion(); super.postPut(c, put, edit, durability); if (Bytes.equals(put.getRow(), Bytes.toBytes("row2"))) { region.flush(false); Assert.assertTrue(region.getMemStoreDataSize() >= 0); } } }
private void initializePruneState(RegionCoprocessorEnvironment env) { CConfiguration conf = topicMetadataCache.getCConfiguration(); if (conf != null) { pruneEnable = conf.getBoolean(TxConstants.TransactionPruning.PRUNE_ENABLE, TxConstants.TransactionPruning.DEFAULT_PRUNE_ENABLE); if (Boolean.TRUE.equals(pruneEnable)) { String pruneTable = conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE, TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE); long pruneFlushInterval = TimeUnit.SECONDS.toMillis(conf.getLong( TxConstants.TransactionPruning.PRUNE_FLUSH_INTERVAL, TxConstants.TransactionPruning.DEFAULT_PRUNE_FLUSH_INTERVAL)); compactionState = new CompactionState(env, TableName.valueOf(pruneTable), pruneFlushInterval); if (LOG.isDebugEnabled()) { TableName tableName = env.getRegion().getRegionInfo().getTable(); LOG.debug(String.format("Automatic invalid list pruning is enabled for table %s:%s. Compaction state " + "will be recorded in table %s", tableName.getNamespaceAsString(), tableName.getNameAsString(), pruneTable)); } } } }
@Override public Result preIncrement(ObserverContext<RegionCoprocessorEnvironment> c, Increment increment) throws IOException { byte[] row = increment.getRow(); Put put = new Put(row); long ts = getUniqueTimestamp(row); for (Map.Entry<byte[], List<Cell>> entry : increment.getFamilyCellMap().entrySet()) { for (Cell cell : entry.getValue()) { put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row) .setFamily(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()) .setQualifier(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) .setValue(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()) .setType(Cell.Type.Put).setTimestamp(ts).build()); } } c.getEnvironment().getRegion().put(put); c.bypass(); return Result.EMPTY_RESULT; }
public void getRowCount(RpcController controller, ExampleProtos.CountRequest request, RpcCallback<ExampleProtos.CountResponse> done) { Scan scan = new Scan(); scan.setFilter(new FirstKeyOnlyFilter()); ExampleProtos.CountResponse response = null; InternalScanner scanner = null; try { scanner = env.getRegion().getScanner(scan); List<Cell> results = new ArrayList<>(); boolean hasMore = false; for (Cell kv : results) { byte[] currentRow = CellUtil.cloneRow(kv); if (lastRow == null || !Bytes.equals(lastRow, currentRow)) { lastRow = currentRow; count++;
@Override public void postPut(org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c, Put put, org.apache.hadoop.hbase.wal.WALEdit edit, Durability durability) throws java.io.IOException { String tableName = c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString(); if (tableName.equalsIgnoreCase(TABLE_NAME) // create the index after the second batch && Bytes.startsWith(put.getRow(), Bytes.toBytes("varchar200_upsert_select"))) { Runnable r = new Runnable() { @Override public void run() { Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); try (Connection conn = DriverManager.getConnection(getUrl(), props)) { // Run CREATE INDEX call in separate thread as otherwise we block // this thread (not a realistic scenario) and prevent our catchup // query from adding the missing rows. conn.createStatement().execute(INDEX_DDL); } catch (SQLException e) { } } }; new Thread(r).start(); } } }