@Before public void setup() throws IOException { conf.set(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, "5000"); fs = dfsClusterRule.getDfscluster().getFileSystem(); assert fs.mkdirs(filesDir); assert fs.mkdirs(locksDir); }
public static HdfsConfiguration getTestHdfsConfiguration() { HdfsConfiguration conf = new HdfsConfiguration(); String testDataPath = new File(TEST_PATH + "/" + UUID.randomUUID().toString()).getAbsolutePath(); String namenodeDir = new File(testDataPath, "name").getAbsolutePath(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeDir); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeDir); conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, 0); conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); return conf; }
@Test public void testFailoverWithAutoHa() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); // Turn on auto-HA in the config HdfsConfiguration conf = getHAConf(); conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); assertEquals(0, runTool("-failover", "nn1", "nn2")); Mockito.verify(mockZkfcProtocol).gracefulFailover(); }
@Before public void setup() throws IOException { conf.set(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, "5000"); fs = dfsClusterRule.getDfscluster().getFileSystem(); assert fs.mkdirs(dir); }
@Before public void setUp() throws IOException { conf.set(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, "5000"); fs = DFS_CLUSTER_RULE.getDfscluster().getFileSystem(); assert fs.mkdirs(locksDir); }
private HdfsConfiguration getTestConfiguration() { HdfsConfiguration conf = new HdfsConfiguration(); if (dualPortTesting) { conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "localhost:0"); } return conf; }
private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) { HdfsConfiguration conf = new HdfsConfiguration(); conf.set(DFS_NAMESERVICES, "ns1"); conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2"); conf.set(DFSUtil.addKeySuffixes( DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"), nnaddr1); conf.set(DFSUtil.addKeySuffixes( DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn2"), nnaddr2); conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHostName, ConfiguredFailoverProxyProvider.class.getName()); return conf; }
@Test public void testSecondaryNameNodeHttpAddressNotNeeded() throws Exception { conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "null"); doNNWithQJMTest(); }
@Test(timeout=10000) public void testExtraArgsThrowsError() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); conf.set("mykey", "myval"); String[] args = {"-namenodes", "unexpected-arg"}; assertTrue(runTool(conf, args, false).contains( "Did not expect argument: unexpected-arg")); }
@Test public void testFailoverWithFenceAndBadFencer() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf = getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "foobar!"); tool.setConf(conf); assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence")); }
@Test public void testFailoverWithFencerConfiguredAndForce() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf = getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence")); }
@Test public void testFailoverWithForceActive() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf = getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); assertEquals(0, runTool("-failover", "nn1", "nn2", "--forceactive")); }
@Test public void testFailoverWithFencerAndNameservice() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf = getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); assertEquals(0, runTool("-ns", "ns1", "-failover", "nn1", "nn2")); }
@Test public void testForceFenceOptionListedBeforeArgs() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf = getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); assertEquals(0, runTool("-failover", "--forcefence", "nn1", "nn2")); }
@Test public void testIntegrity() throws Exception { HdfsConfiguration clusterConf = createSecureConfig( "authentication,integrity,privacy"); startCluster(clusterConf); HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf); clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "integrity"); doTest(clientConf); }
@Test public void testPrivacy() throws Exception { HdfsConfiguration clusterConf = createSecureConfig( "authentication,integrity,privacy"); startCluster(clusterConf); HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf); clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "privacy"); doTest(clientConf); }
@Test public void testAuthentication() throws Exception { HdfsConfiguration clusterConf = createSecureConfig( "authentication,integrity,privacy"); startCluster(clusterConf); HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf); clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication"); doTest(clientConf); }
@Test public void testDataNodeAbortsIfNotHttpsOnly() throws Exception { HdfsConfiguration clusterConf = createSecureConfig("authentication"); clusterConf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTP_AND_HTTPS.name()); exception.expect(RuntimeException.class); exception.expectMessage("Cannot start secure DataNode"); startCluster(clusterConf); }
@Test public void testClientAndServerDoNotHaveCommonQop() throws Exception { HdfsConfiguration clusterConf = createSecureConfig("privacy"); startCluster(clusterConf); HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf); clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication"); exception.expect(IOException.class); exception.expectMessage("could only be replicated to 0 nodes"); doTest(clientConf); }