private static void warnOrThrowExceptionForFailure(boolean logWarn, String confKey, String message, Exception cause) throws IOException { if (!logWarn) { throw new DoNotRetryIOException(message + " Set " + confKey + " to false at conf or table descriptor if you want to bypass sanity checks", cause); } LOG.warn(message); }
private ReplicationPeerDescription checkPeerExists(String peerId) throws DoNotRetryIOException { ReplicationPeerDescription desc = peers.get(peerId); if (desc == null) { throw new DoNotRetryIOException("Replication peer " + peerId + " does not exist"); } return desc; }
private void checkMutationType(final Mutation mutation, final byte [] row) throws DoNotRetryIOException { boolean isPut = mutation instanceof Put; if (!isPut && !(mutation instanceof Delete)) { throw new org.apache.hadoop.hbase.DoNotRetryIOException("Action must be Put or Delete"); } if (!Bytes.equals(row, mutation.getRow())) { throw new org.apache.hadoop.hbase.DoNotRetryIOException("Action's getRow must match"); } }
@Override public void processRequest(ByteBuff buf) throws IOException, InterruptedException { // this will throw exception after the connection header is read, and an RPC is sent // from client throw new DoNotRetryIOException("Failing for test"); } }
@Override public Result preAppend(final ObserverContext<RegionCoprocessorEnvironment> e, final Append append) throws IOException { throw new DoNotRetryIOException("Call failed and don't retry"); } }
private void checkClosed() throws DoNotRetryIOException { if (this.closed) { throw new DoNotRetryIOException(toString() + " closed"); } }
@Override public Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> e, final Increment increment) throws IOException { throw new DoNotRetryIOException("Call failed and don't retry"); }
/** * @throws IOException Throws exception if region is in read-only mode. */ protected void checkReadOnly() throws IOException { if (isReadOnly()) { throw new DoNotRetryIOException("region is read only"); } }
@Override public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put, final WALEdit edit, final Durability durability) throws IOException { throw new DoNotRetryIOException("Call failed and don't retry"); }
@Override public void preDelete(final ObserverContext<RegionCoprocessorEnvironment> e, final Delete delete, final WALEdit edit, final Durability durability) throws IOException { throw new DoNotRetryIOException("Call failed and don't retry"); }
private void checkClusterKey(String clusterKey) throws DoNotRetryIOException { try { ZKConfig.validateClusterKey(clusterKey); } catch (IOException e) { throw new DoNotRetryIOException("Invalid cluster key: " + clusterKey, e); } }
@Override public void processRequest(ByteBuff buf) throws IOException, InterruptedException { // this will throw exception after the connection header is read, and an RPC is sent // from client throw new DoNotRetryIOException("Failing for test"); } }
private void validateTimedQuota(final TimedQuota timedQuota) throws IOException { if (timedQuota.getSoftLimit() < 1) { throw new DoNotRetryIOException(new UnsupportedOperationException( "The throttle limit must be greater then 0, got " + timedQuota.getSoftLimit())); } }
private void checkSyncReplicationPeerConfigConflict(ReplicationPeerConfig peerConfig) throws DoNotRetryIOException { for (TableName tableName : peerConfig.getTableCFsMap().keySet()) { for (Map.Entry<String, ReplicationPeerDescription> entry : peers.entrySet()) { ReplicationPeerConfig rpc = entry.getValue().getPeerConfig(); if (rpc.isSyncReplication() && rpc.getTableCFsMap().containsKey(tableName)) { throw new DoNotRetryIOException( "Table " + tableName + " has been replicated by peer " + entry.getKey()); } } } }
void preDisablePeer(String peerId) throws DoNotRetryIOException { ReplicationPeerDescription desc = checkPeerExists(peerId); if (!desc.isEnabled()) { throw new DoNotRetryIOException("Replication peer " + peerId + " has already been disabled"); } }
private void checkReplicationScope(ColumnFamilyDescriptor hcd) throws IOException{ // check replication scope WALProtos.ScopeType scop = WALProtos.ScopeType.valueOf(hcd.getScope()); if (scop == null) { String message = "Replication scope for column family " + hcd.getNameAsString() + " is " + hcd.getScope() + " which is invalid."; LOG.error(message); throw new DoNotRetryIOException(message); } }
private void rejectIfInStandByState(HRegion region) throws DoNotRetryIOException { if (shouldRejectRequestsFromClient(region)) { throw new DoNotRetryIOException( region.getRegionInfo().getRegionNameAsString() + " is in STANDBY state."); } }
private void checkPeerInDAStateIfSyncReplication(String peerId) throws DoNotRetryIOException { ReplicationPeerDescription desc = peers.get(peerId); if (desc != null && desc.getPeerConfig().isSyncReplication() && !SyncReplicationState.DOWNGRADE_ACTIVE.equals(desc.getSyncReplicationState())) { throw new DoNotRetryIOException("Couldn't remove synchronous replication peer with state=" + desc.getSyncReplicationState() + ", Transit the synchronous replication state to be DOWNGRADE_ACTIVE firstly."); } }
@Override public synchronized void addRSGroup(RSGroupInfo rsGroupInfo) throws IOException { checkGroupName(rsGroupInfo.getName()); if (rsGroupMap.get(rsGroupInfo.getName()) != null || rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { throw new DoNotRetryIOException("Group already exists: " + rsGroupInfo.getName()); } Map<String, RSGroupInfo> newGroupMap = Maps.newHashMap(rsGroupMap); newGroupMap.put(rsGroupInfo.getName(), rsGroupInfo); flushConfig(newGroupMap); }
@Test public void testDoNotRetryExceptionOnAssignment() throws Exception { // collect AM metrics before test collectAssignmentManagerMetrics(); testFailedOpen(TableName.valueOf("testDoNotRetryExceptionOnAssignment"), new FaultyRsExecutor(new DoNotRetryIOException("test do not retry fault"))); assertEquals(assignSubmittedCount + 1, assignProcMetrics.getSubmittedCounter().getCount()); assertEquals(assignFailedCount + 1, assignProcMetrics.getFailedCounter().getCount()); }