private static Policy getPolicy(final Configuration conf) { final boolean enabled = conf.getBoolean( DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_KEY, DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_DEFAULT); if (!enabled) { return Policy.DISABLE; } final String policy = conf.get( DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY, DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_DEFAULT); for(int i = 1; i < Policy.values().length; i++) { final Policy p = Policy.values()[i]; if (p.name().equalsIgnoreCase(policy)) { return p; } } throw new HadoopIllegalArgumentException("Illegal configuration value for " + DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY + ": " + policy); }
private static Policy getPolicy(final Configuration conf) { final boolean enabled = conf.getBoolean( DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_KEY, DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_DEFAULT); if (!enabled) { return Policy.DISABLE; } final String policy = conf.get( DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY, DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_DEFAULT); for(int i = 1; i < Policy.values().length; i++) { final Policy p = Policy.values()[i]; if (p.name().equalsIgnoreCase(policy)) { return p; } } throw new HadoopIllegalArgumentException("Illegal configuration value for " + DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY + ": " + policy); }
private static Policy getPolicy(final Configuration conf) { final boolean enabled = conf.getBoolean( HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY, HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_DEFAULT); if (!enabled) { return Policy.DISABLE; } final String policy = conf.get( HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.POLICY_KEY, HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.POLICY_DEFAULT); for(int i = 1; i < Policy.values().length; i++) { final Policy p = Policy.values()[i]; if (p.name().equalsIgnoreCase(policy)) { return p; } } throw new HadoopIllegalArgumentException("Illegal configuration value for " + HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.POLICY_KEY + ": " + policy); }
/** Write the setting to configuration. */ public static void write(final Policy policy, final boolean bestEffort, final Configuration conf) { conf.setBoolean( DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_KEY, policy != Policy.DISABLE); conf.set( DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY, policy.name()); conf.setBoolean( DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_BEST_EFFORT_KEY, bestEffort); } }
/** Write the setting to configuration. */ public static void write(final Policy policy, final boolean bestEffort, final Configuration conf) { conf.setBoolean( HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY, policy != Policy.DISABLE); conf.set( HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.POLICY_KEY, policy.name()); conf.setBoolean( HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.BEST_EFFORT_KEY, bestEffort); } }
/** Write the setting to configuration. */ public static void write(final Policy policy, final boolean bestEffort, final Configuration conf) { conf.setBoolean( DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_KEY, policy != Policy.DISABLE); conf.set( DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY, policy.name()); conf.setBoolean( DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_BEST_EFFORT_KEY, bestEffort); } }
@Override public String toString() { return policy.toString(); }
@Override public String toString() { return policy.toString(); }
@Override public String toString() { return policy.toString(); }
/** Does it need a replacement according to the policy? */ public boolean satisfy( final short replication, final DatanodeInfo[] existings, final boolean isAppend, final boolean isHflushed) { final int n = existings == null? 0: existings.length; if (n == 0 || n >= replication) { //don't need to add datanode for any policy. return false; } else { return policy.getCondition().satisfy( replication, existings, n, isAppend, isHflushed); } }
/** Does it need a replacement according to the policy? */ public boolean satisfy( final short replication, final DatanodeInfo[] existings, final boolean isAppend, final boolean isHflushed) { final int n = existings == null ? 0 : existings.length; //don't need to add datanode for any policy. return !(n == 0 || n >= replication) && policy.getCondition().satisfy(replication, existings, n, isAppend, isHflushed); }
/** Does it need a replacement according to the policy? */ public boolean satisfy( final short replication, final DatanodeInfo[] existings, final boolean isAppend, final boolean isHflushed) { final int n = existings == null? 0: existings.length; if (n == 0 || n >= replication) { //don't need to add datanode for any policy. return false; } else { return policy.getCondition().satisfy( replication, existings, n, isAppend, isHflushed); } }