CommandLineOpts opts = SecondaryNameNode.parseArgs(argv); if (opts == null) { LOG.error("Failed to parse options"); Configuration tconf = new HdfsConfiguration(); SecondaryNameNode secondary = null; secondary = new SecondaryNameNode(tconf, opts); int ret = secondary.processStartupCommand(opts); terminate(ret); } else { secondary.startInfoServer(); secondary.startCheckpointThread(); secondary.join();
public SecondaryNameNode(Configuration conf, CommandLineOpts commandLineOpts) throws IOException { try { String nsId = DFSUtil.getSecondaryNameServiceId(conf); if (HAUtil.isHAEnabled(conf, nsId)) { throw new IOException( "Cannot use SecondaryNameNode in an HA cluster." + " The Standby Namenode will perform checkpointing."); } NameNode.initializeGenericKeys(conf, nsId, null); initialize(conf, commandLineOpts); } catch (IOException e) { shutdown(); throw e; } catch (HadoopIllegalArgumentException e) { shutdown(); throw e; } }
@Override public void run() { try { snn.doCheckpoint(); } catch (Throwable t) { thrown = t; } }
switch (opts.getCommand()) { case CHECKPOINT: long count = countUncheckpointedTxns(); if (count > checkpointConf.getTxnCount() || opts.shouldForceCheckpoint()) { doCheckpoint(); } else { System.err.println("EditLog size " + count + " transactions is " + long uncheckpointed = countUncheckpointedTxns(); System.out.println("NameNode has " + uncheckpointed + " uncheckpointed transactions");
sn = new SecondaryNameNode(config); assertNotNull(sn); sn.doCheckpoint(); // this shouldn't fail LOG.info("--done checkpoint"); } catch (IOException e) { } finally { if(sn!=null) sn.shutdown(); if(cluster!=null) cluster.shutdown();
secondary.doCheckpoint(); // this should fail fail("Did not get expected exception"); } catch (IOException e) { secondary.shutdown(); // secondary namenode crash! secondary.doCheckpoint(); // this should work correctly secondary.shutdown();
sn = new SecondaryNameNode(config); assertNotNull(sn); sn.doCheckpoint(); // this shouldn't fail LOG.info("--done checkpoint"); LOG.info("--edits file " + edf.getAbsolutePath() + "; len = " + edf.length()); FSImage chkpImage = sn.getFSImage(); verifyDifferentDirs(chkpImage, imf.length(), edf.length()); } finally { if(sn!=null) sn.shutdown(); if(cluster!=null) cluster.shutdown();
secondary = startSecondaryNameNode(conf); DFSTestUtil.createFile(fs, new Path("tmpfile0"), 1024, (short) 1, 0l); secondary.doCheckpoint(); secondary.doCheckpoint(); fail("Fault injection failed."); } catch (IOException ioe) { for (StorageDirectory sd : secondary.getFSImage().getStorage() .dirIterable(NameNodeDirType.EDITS)) { File[] tmpEdits = sd.getCurrentDir().listFiles(tmpEditsFilter); secondary.doCheckpoint(); } finally { if (secondary != null) { secondary.shutdown();
cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); secondary = new SecondaryNameNode(conf); SnapshotManager nnSnapshotManager = cluster.getNamesystem().getSnapshotManager(); SnapshotManager secondarySnapshotManager = secondary.getFSNamesystem().getSnapshotManager(); secondary.doCheckpoint(); assertEquals(1, secondarySnapshotManager.getNumSnapshots()); assertEquals(1, secondarySnapshotManager.getNumSnapshottableDirs()); secondary.doCheckpoint(); assertEquals(0, secondarySnapshotManager.getNumSnapshots()); assertEquals(0, secondarySnapshotManager.getNumSnapshottableDirs()); secondary.shutdown();
@SuppressWarnings("deprecation") SecondaryNameNode startSecondaryNameNode(Configuration conf ) throws IOException { conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0"); return new SecondaryNameNode(conf); }
/** * Check whether the secondary name-node can be started. */ @SuppressWarnings("deprecation") private boolean canStartSecondaryNode(Configuration conf) throws IOException { // Using full name allows us not to have to add deprecation tag to // entire source file. org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode sn = null; try { sn = new org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode(conf); } catch(IOException e) { if (e instanceof java.net.BindException) return false; throw e; } finally { if(sn != null) sn.shutdown(); } return true; }
startCheckpoint(); downloadCheckpointFiles(sig); // Fetch fsimage and edits doMerge(sig); // Do the merge putFSImage(sig);
final long now = Time.now(); if (shouldCheckpointBasedOnCount() || monotonicNow >= lastCheckpointTime + 1000 * checkpointConf.getPeriod()) { doCheckpoint(); lastCheckpointTime = monotonicNow; lastCheckpointWallclockTime = now;
loadImage |= downloadCheckpointFiles( fsName, checkpointImage, sig, manifest) | checkpointImage.hasMergeError(); try { doMerge(sig, manifest, loadImage, checkpointImage, namesystem); } catch (IOException ioe) {
/** * main() has some simple utility methods. * @param argv Command line parameters. * @exception Exception if the filesystem does not exist. */ public static void main(String[] argv) throws Exception { StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG); Configuration tconf = new Configuration(); try { argv = DFSUtil.setGenericConf(argv, tconf); } catch (IllegalArgumentException e) { System.err.println(e.getMessage()); printUsage(""); return; } if (argv.length >= 1) { SecondaryNameNode secondary = new SecondaryNameNode(tconf); int ret = secondary.processArgs(argv); System.exit(ret); } // Create a never ending deamon Daemon checkpointThread = new Daemon(new SecondaryNameNode(tconf)); checkpointThread.start(); }
try { secondary = startSecondaryNameNode(conf); assertFalse(secondary.getFSImage().isLockSupported(0)); secondary.shutdown(); } catch (IOException e) { // expected to fail assertTrue(secondary == null); try { secondary2 = startSecondaryNameNode(conf); assertFalse(secondary2.getFSImage().isLockSupported(0)); secondary2.shutdown(); } catch (IOException e) { // expected to fail assertTrue(secondary2 == null); secondary.shutdown(); secondary.shutdown(); for(URI uri : secondaryDirs) { File dir = new File(uri.getPath()); secondary.shutdown(); for(URI uri : secondaryDirs) { File dir = new File(uri.getPath());
/** * main() has some simple utility methods. * @param argv Command line parameters. * @exception Exception if the filesystem does not exist. */ public static void main(String[] argv) throws Exception { StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG); Configuration tconf = new Configuration(); if (argv.length >= 1) { SecondaryNameNode secondary = new SecondaryNameNode(tconf); int ret = secondary.processArgs(argv); System.exit(ret); } // Create a never ending deamon Daemon checkpointThread = new Daemon(new SecondaryNameNode(tconf)); checkpointThread.start(); }
final InetSocketAddress infoSocAddr = getHttpAddress(conf); final String infoBindAddress = infoSocAddr.getHostName(); UserGroupInformation.setConfiguration(conf); fsName = getInfoServer(); checkpointDirs = FSImage.getCheckpointDirs(conf, "/tmp/hadoop/dfs/namesecondary");
@Override protected void doDelete(SecondaryNameNode service) throws Exception { service.shutdown(); }
final NamenodeProtocol origNN = secondary1.getNameNode(); final Answer<Object> delegator = new GenericTestUtils.DelegateAnswer(origNN); NamenodeProtocol spyNN = Mockito.mock(NamenodeProtocol.class, delegator); secondary1.setNameNode(spyNN); secondary2.doCheckpoint(); secondary2.doCheckpoint(); assertEquals(6, storage.getMostRecentCheckpointTxId()); secondary1.setNameNode(origNN); secondary1.doCheckpoint();