@Override public long addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled) throws ReplicationException, IOException { LOG.info(getClientIdAuditPrefix() + " creating replication peer, id=" + peerId + ", config=" + peerConfig + ", state=" + (enabled ? "ENABLED" : "DISABLED")); return executePeerProcedure(new AddPeerProcedure(peerId, peerConfig, enabled)); }
@Override public long removeReplicationPeer(String peerId) throws ReplicationException, IOException { LOG.info(getClientIdAuditPrefix() + " removing replication peer, id=" + peerId); return executePeerProcedure(new RemovePeerProcedure(peerId)); }
@Override public long disableReplicationPeer(String peerId) throws ReplicationException, IOException { LOG.info(getClientIdAuditPrefix() + " disable replication peer, id=" + peerId); return executePeerProcedure(new DisablePeerProcedure(peerId)); }
@Override public long updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig) throws ReplicationException, IOException { LOG.info(getClientIdAuditPrefix() + " update replication peer config, id=" + peerId + ", config=" + peerConfig); return executePeerProcedure(new UpdatePeerConfigProcedure(peerId, peerConfig)); }
@Override public long transitReplicationPeerSyncReplicationState(String peerId, SyncReplicationState state) throws ReplicationException, IOException { LOG.info( getClientIdAuditPrefix() + " transit current cluster state to {} in a synchronous replication peer id={}", state, peerId); return executePeerProcedure(new TransitPeerSyncReplicationStateProcedure(peerId, state)); }
@Override public long enableReplicationPeer(String peerId) throws ReplicationException, IOException { LOG.info(getClientIdAuditPrefix() + " enable replication peer, id=" + peerId); return executePeerProcedure(new EnablePeerProcedure(peerId)); }
@Override public List<ReplicationPeerDescription> listReplicationPeers(String regex) throws ReplicationException, IOException { if (cpHost != null) { cpHost.preListReplicationPeers(regex); } LOG.info(getClientIdAuditPrefix() + " list replication peers, regex=" + regex); Pattern pattern = regex == null ? null : Pattern.compile(regex); List<ReplicationPeerDescription> peers = this.replicationPeerManager.listPeers(pattern); if (cpHost != null) { cpHost.postListReplicationPeers(regex); } return peers; }
@Override public StopMasterResponse stopMaster(RpcController controller, StopMasterRequest request) throws ServiceException { LOG.info(master.getClientIdAuditPrefix() + " stop"); try { master.stopMaster(); } catch (IOException e) { LOG.error("Exception occurred while stopping master", e); throw new ServiceException(e); } return StopMasterResponse.newBuilder().build(); }
@Override public ShutdownResponse shutdown(RpcController controller, ShutdownRequest request) throws ServiceException { LOG.info(master.getClientIdAuditPrefix() + " shutdown"); try { master.shutdown(); } catch (IOException e) { LOG.error("Exception occurred in HMaster.shutdown()", e); throw new ServiceException(e); } return ShutdownResponse.newBuilder().build(); }
@Override public ReplicationPeerConfig getReplicationPeerConfig(String peerId) throws ReplicationException, IOException { if (cpHost != null) { cpHost.preGetReplicationPeerConfig(peerId); } LOG.info(getClientIdAuditPrefix() + " get replication peer config, id=" + peerId); ReplicationPeerConfig peerConfig = this.replicationPeerManager.getPeerConfig(peerId) .orElseThrow(() -> new ReplicationPeerNotFoundException(peerId)); if (cpHost != null) { cpHost.postGetReplicationPeerConfig(peerId); } return peerConfig; }
@Override protected void run() throws IOException { getMaster().getMasterCoprocessorHost().preSplitRegion(regionInfo.getTable(), splitRow); LOG.info(getClientIdAuditPrefix() + " split " + regionInfo.getRegionNameAsString()); // Execute the operation asynchronously submitProcedure(getAssignmentManager().createSplitProcedure(regionInfo, splitRow)); }
@Override public SetNormalizerRunningResponse setNormalizerRunning(RpcController controller, SetNormalizerRunningRequest request) throws ServiceException { rpcPreCheck("setNormalizerRunning"); // Sets normalizer on/off flag in ZK. boolean prevValue = master.getRegionNormalizerTracker().isNormalizerOn(); boolean newValue = request.getOn(); try { master.getRegionNormalizerTracker().setNormalizerOn(newValue); } catch (KeeperException ke) { LOG.warn("Error flipping normalizer switch", ke); } LOG.info("{} set normalizerSwitch={}", master.getClientIdAuditPrefix(), newValue); return SetNormalizerRunningResponse.newBuilder().setPrevNormalizerValue(prevValue).build(); }
@Override protected void run() throws IOException { getMaster().getMasterCoprocessorHost().preTruncateTable(tableName); LOG.info(getClientIdAuditPrefix() + " truncate " + tableName); ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0); submitProcedure(new TruncateTableProcedure(procedureExecutor.getEnvironment(), tableName, preserveSplits, latch)); latch.await(); getMaster().getMasterCoprocessorHost().postTruncateTable(tableName); }
@Override protected void run() throws IOException { getMaster().getMasterCoprocessorHost().preMergeRegions(regionsToMerge); LOG.info(getClientIdAuditPrefix() + " Merge regions " + regionsToMerge[0].getEncodedName() + " and " + regionsToMerge[1].getEncodedName()); submitProcedure(new MergeTableRegionsProcedure(procedureExecutor.getEnvironment(), regionsToMerge, forcible)); getMaster().getMasterCoprocessorHost().postMergeRegions(regionsToMerge); }
@Override protected void run() throws IOException { getMaster().getMasterCoprocessorHost().preDeleteTable(tableName); LOG.info(getClientIdAuditPrefix() + " delete " + tableName); // TODO: We can handle/merge duplicate request // // We need to wait for the procedure to potentially fail due to "prepare" sanity // checks. This will block only the beginning of the procedure. See HBASE-19953. ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); submitProcedure(new DeleteTableProcedure(procedureExecutor.getEnvironment(), tableName, latch)); latch.await(); getMaster().getMasterCoprocessorHost().postDeleteTable(tableName); }
@Override protected void run() throws IOException { getMaster().getMasterCoprocessorHost().preCreateTable(desc, newRegions); LOG.info(getClientIdAuditPrefix() + " create " + desc); // TODO: We can handle/merge duplicate requests, and differentiate the case of // TableExistsException by saying if the schema is the same or not. // // We need to wait for the procedure to potentially fail due to "prepare" sanity // checks. This will block only the beginning of the procedure. See HBASE-19953. ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); submitProcedure( new CreateTableProcedure(procedureExecutor.getEnvironment(), desc, newRegions, latch)); latch.await(); getMaster().getMasterCoprocessorHost().postCreateTable(desc, newRegions); }
@Override protected void run() throws IOException { getMaster().getMasterCoprocessorHost().preCreateNamespace(namespaceDescriptor); // We need to wait for the procedure to potentially fail due to "prepare" sanity // checks. This will block only the beginning of the procedure. See HBASE-19953. ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); LOG.info(getClientIdAuditPrefix() + " creating " + namespaceDescriptor); // Execute the operation synchronously - wait for the operation to complete before // continuing. setProcId(getClusterSchema().createNamespace(namespaceDescriptor, getNonceKey(), latch)); latch.await(); getMaster().getMasterCoprocessorHost().postCreateNamespace(namespaceDescriptor); }
@Override public CreateTableResponse createTable(RpcController controller, CreateTableRequest req) throws ServiceException { TableDescriptor tableDescriptor = ProtobufUtil.toTableDescriptor(req.getTableSchema()); byte [][] splitKeys = ProtobufUtil.getSplitKeysArray(req); try { long procId = master.createTable(tableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce()); LOG.info(master.getClientIdAuditPrefix() + " procedure request for creating table: " + req.getTableSchema().getTableName() + " procId is: " + procId); return CreateTableResponse.newBuilder().setProcId(procId).build(); } catch (IOException ioe) { throw new ServiceException(ioe); } }
@Override protected void run() throws IOException { getMaster().getMasterCoprocessorHost().preDeleteNamespace(name); LOG.info(getClientIdAuditPrefix() + " delete " + name); // Execute the operation synchronously - wait for the operation to complete before // continuing. // // We need to wait for the procedure to potentially fail due to "prepare" sanity // checks. This will block only the beginning of the procedure. See HBASE-19953. ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); setProcId(submitProcedure( new DeleteNamespaceProcedure(procedureExecutor.getEnvironment(), name, latch))); latch.await(); // Will not be invoked in the face of Exception thrown by the Procedure's execution getMaster().getMasterCoprocessorHost().postDeleteNamespace(name); }
@Override protected void run() throws IOException { NamespaceDescriptor oldNsDescriptor = getNamespace(newNsDescriptor.getName()); getMaster().getMasterCoprocessorHost().preModifyNamespace(oldNsDescriptor, newNsDescriptor); // We need to wait for the procedure to potentially fail due to "prepare" sanity // checks. This will block only the beginning of the procedure. See HBASE-19953. ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); LOG.info(getClientIdAuditPrefix() + " modify " + newNsDescriptor); // Execute the operation synchronously - wait for the operation to complete before // continuing. setProcId(getClusterSchema().modifyNamespace(newNsDescriptor, getNonceKey(), latch)); latch.await(); getMaster().getMasterCoprocessorHost().postModifyNamespace(oldNsDescriptor, newNsDescriptor); }