if (count > checkpointConf.getTxnCount() || opts.shouldForceCheckpoint()) { doCheckpoint(); } else { System.err.println("EditLog size " + count + " transactions is " +
doCheckpoint(); lastCheckpointTime = monotonicNow; lastCheckpointWallclockTime = now;
@Override public void run() { try { snn.doCheckpoint(); } catch (Throwable t) { thrown = t; } }
if (size >= checkpointSize || now >= lastCheckpointTime + 1000 * checkpointPeriod) { doCheckpoint(); lastCheckpointTime = now;
if (size >= checkpointSize || now >= lastCheckpointTime + 1000 * checkpointPeriod) { doCheckpoint(); lastCheckpointTime = now;
sn.doCheckpoint(); // this shouldn't fail LOG.info("--done checkpoint"); } catch (IOException e) {
snn.doCheckpoint(); snn.doCheckpoint(); } finally { if (fs != null) fs.close();
secondary.doCheckpoint(); String files1[] = tmpDir.list(); assertEquals("Only one file is expected", 1, files1.length); secondary.doCheckpoint(); secondary.doCheckpoint(); String files2[] = tmpDir.list(); assertEquals("Two files are expected", 2, files2.length);
cluster.waitActive(); secondary = startSecondaryNameNode(conf); secondary.doCheckpoint(); assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ", checkpointNameDir1.exists());
fos = fs.create(new Path("tmpfile0")); fos.write(new byte[] { 0, 1, 2, 3 }); secondary.doCheckpoint(); fos.write(new byte[] { 0, 1, 2, 3 }); fos.hsync(); secondary.doCheckpoint(); fail("Fault injection failed."); } catch (IOException ioe) { secondary.doCheckpoint()); } finally { if (fs != null) {
LOG.info("--file " + p.toString() + " created"); LOG.info("--doing checkpoint"); sn.doCheckpoint(); // this shouldn't fail LOG.info("--done checkpoint");
secondary.doCheckpoint(); fail("Fault injection failed."); } catch (IOException ioe) { secondary.doCheckpoint(); } finally { if (secondary != null) {
dfs.mkdirs(new Path("/test/foo")); snn.doCheckpoint(); snn.doCheckpoint(); } finally { IOUtils.cleanup(null, dfs);
secondary.doCheckpoint();
secondary.doCheckpoint(); // this should fail fail("Checkpoint succeeded even though we injected an error!"); } catch (IOException e) {
secondary.doCheckpoint(); secondary.doCheckpoint();
secondary.doCheckpoint(); secondary.doCheckpoint(); secondary.doCheckpoint();
secondary.doCheckpoint(); secondary.doCheckpoint(); } finally { cleanup(secondary);
secondary.doCheckpoint(); assertEquals(1, secondary.getFSNamesystem().getLeaseManager().countLease()); fos.close(); secondary.doCheckpoint();
@Test public void testCheckpointSignature() throws IOException { MiniDFSCluster cluster = null; Configuration conf = new HdfsConfiguration(); SecondaryNameNode secondary = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) .format(true).build(); NameNode nn = cluster.getNameNode(); NamenodeProtocols nnRpc = nn.getRpcServer(); secondary = startSecondaryNameNode(conf); // prepare checkpoint image secondary.doCheckpoint(); CheckpointSignature sig = nnRpc.rollEditLog(); // manipulate the CheckpointSignature fields sig.setBlockpoolID("somerandomebpid"); sig.clusterID = "somerandomcid"; try { sig.validateStorageInfo(nn.getFSImage()); // this should fail assertTrue("This test is expected to fail.", false); } catch (Exception ignored) { } } finally { cleanup(secondary); secondary = null; cleanup(cluster); cluster = null; } }