/** * Removes a shutdownHook. * * @param shutdownHook shutdownHook to remove. * @return TRUE if the shutdownHook was registered and removed, * FALSE otherwise (including when shutdownHook == null) */ public static boolean removeShutdownHook(Runnable shutdownHook) { if (shutdownHook == null) { return false; } return MGR.removeShutdownHook(shutdownHook); }
/** * Unregister the hook. */ public synchronized void unregister() { try { ShutdownHookManager.get().removeShutdownHook(this); } catch (IllegalStateException e) { LOG.info("Failed to unregister shutdown hook: {}", e, e); } }
void shutdown(BlockListAsLongs blocksListToPersist) { saveReplicas(blocksListToPersist); saveDfsUsed(); dfsUsedSaved = true; // Remove the shutdown hook to avoid any memory leak if (shutdownHook != null) { ShutdownHookManager.get().removeShutdownHook(shutdownHook); } if (dfsUsage instanceof CachingGetSpaceUsed) { IOUtils.cleanupWithLogger(LOG, ((CachingGetSpaceUsed) dfsUsage)); } }
/** * Removes a shutdownHook. * * @param shutdownHook shutdownHook to remove. * @return TRUE if the shutdownHook was registered and removed, * FALSE otherwise (including when shutdownHook == null) */ public static boolean removeShutdownHook(Runnable shutdownHook) { if (shutdownHook == null) { return false; } return MGR.removeShutdownHook(shutdownHook); }
public synchronized void unregister() { if (hook != null) { try { ShutdownHookManager.get().removeShutdownHook(hook); } catch (IllegalStateException e) { LOG.info("Failed to unregister shutdown hook: {}", e, e); } hook = null; } }
public synchronized void unregister() { if (hook != null) { try { ShutdownHookManager.get().removeShutdownHook(hook); } catch (IllegalStateException e) { LOG.info("Failed to unregister shutdown hook: {}", e, e); } hook = null; } }
/** * Cleanup all shutdown hooks added by Spark and execute them directly. * This is needed so that for CDAP standalone, it won't leak memory through shutdown hooks. */ private void cleanupShutdownHooks() { // With Hadoop 2, Spark uses the Hadoop ShutdownHookManager ShutdownHookManager manager = ShutdownHookManager.get(); try { // Use reflection to get the shutdown hooks Method getShutdownHooksInOrder = manager.getClass().getDeclaredMethod("getShutdownHooksInOrder"); if (!Collection.class.isAssignableFrom(getShutdownHooksInOrder.getReturnType())) { LOG.warn("Unsupported method {}. Spark shutdown hooks cleanup skipped.", getShutdownHooksInOrder); return; } getShutdownHooksInOrder.setAccessible(true); // Filter out hooks that are defined in the same SparkRunnerClassLoader as this SparkProgramRunner class // This is for the case when there are concurrent Spark job running in the same VM for (Object hookEntry : (Collection<?>) getShutdownHooksInOrder.invoke(manager)) { Runnable runnable = getShutdownHookRunnable(hookEntry); if (runnable != null && runnable.getClass().getClassLoader() == getClass().getClassLoader()) { LOG.debug("Running Spark shutdown hook {}", runnable); runnable.run(); manager.removeShutdownHook(runnable); } } } catch (Exception e) { LOG.warn("Failed to cleanup Spark shutdown hooks.", e); } }
/** * Cleanup all shutdown hooks added by Spark and execute them directly. * This is needed so that for CDAP standalone, it won't leak memory through shutdown hooks. */ private void cleanupShutdownHooks() { // With Hadoop 2, Spark uses the Hadoop ShutdownHookManager ShutdownHookManager manager = ShutdownHookManager.get(); try { // Use reflection to get the shutdown hooks Method getShutdownHooksInOrder = manager.getClass().getDeclaredMethod("getShutdownHooksInOrder"); if (!Collection.class.isAssignableFrom(getShutdownHooksInOrder.getReturnType())) { LOG.warn("Unsupported method {}. Spark shutdown hooks cleanup skipped.", getShutdownHooksInOrder); return; } getShutdownHooksInOrder.setAccessible(true); // Filter out hooks that are defined in the same SparkRunnerClassLoader as this SparkProgramRunner class // This is for the case when there are concurrent Spark job running in the same VM for (Object hookEntry : (Collection<?>) getShutdownHooksInOrder.invoke(manager)) { Runnable runnable = getShutdownHookRunnable(hookEntry); if (runnable != null && runnable.getClass().getClassLoader() == getClass().getClassLoader()) { LOG.debug("Running Spark shutdown hook {}", runnable); runnable.run(); manager.removeShutdownHook(runnable); } } } catch (Exception e) { LOG.warn("Failed to cleanup Spark shutdown hooks.", e); } }
/** * Cleanup all shutdown hooks added by Spark and execute them directly. * This is needed so that for CDAP standalone, it won't leak memory through shutdown hooks. */ private void cleanupShutdownHooks() { // With Hadoop 2, Spark uses the Hadoop ShutdownHookManager ShutdownHookManager manager = ShutdownHookManager.get(); try { // Use reflection to get the shutdown hooks Method getShutdownHooksInOrder = manager.getClass().getDeclaredMethod("getShutdownHooksInOrder"); if (!Collection.class.isAssignableFrom(getShutdownHooksInOrder.getReturnType())) { LOG.warn("Unsupported method {}. Spark shutdown hooks cleanup skipped.", getShutdownHooksInOrder); return; } getShutdownHooksInOrder.setAccessible(true); // Filter out hooks that are defined in the same SparkRunnerClassLoader as this SparkProgramRunner class // This is for the case when there are concurrent Spark job running in the same VM for (Object hookEntry : (Collection<?>) getShutdownHooksInOrder.invoke(manager)) { Runnable runnable = getShutdownHookRunnable(hookEntry); if (runnable != null && runnable.getClass().getClassLoader() == getClass().getClassLoader()) { LOG.debug("Running Spark shutdown hook {}", runnable); runnable.run(); manager.removeShutdownHook(runnable); } } } catch (Exception e) { LOG.warn("Failed to cleanup Spark shutdown hooks.", e); } }
private void initAndStartNodeManager(Configuration conf, boolean hasToReboot) { try { // Remove the old hook if we are rebooting. if (hasToReboot && null != nodeManagerShutdownHook) { ShutdownHookManager.get().removeShutdownHook(nodeManagerShutdownHook); } nodeManagerShutdownHook = new CompositeServiceShutdownHook(this); ShutdownHookManager.get().addShutdownHook(nodeManagerShutdownHook, SHUTDOWN_HOOK_PRIORITY); // System exit should be called only when NodeManager is instantiated from // main() funtion this.shouldExitOnShutdownEvent = true; this.init(conf); this.start(); } catch (Throwable t) { LOG.fatal("Error starting NodeManager", t); System.exit(-1); } }
private void initAndStartNodeManager(Configuration conf, boolean hasToReboot) { try { // Remove the old hook if we are rebooting. if (hasToReboot && null != nodeManagerShutdownHook) { ShutdownHookManager.get().removeShutdownHook(nodeManagerShutdownHook); } nodeManagerShutdownHook = new CompositeServiceShutdownHook(this); ShutdownHookManager.get().addShutdownHook(nodeManagerShutdownHook, SHUTDOWN_HOOK_PRIORITY); // System exit should be called only when NodeManager is instantiated from // main() funtion this.shouldExitOnShutdownEvent = true; this.init(conf); this.start(); } catch (Throwable t) { LOG.fatal("Error starting NodeManager", t); System.exit(-1); } }
private void initAndStartNodeManager(Configuration conf, boolean hasToReboot) { try { // Failed to start if we're a Unix based system but we don't have bash. // Bash is necessary to launch containers under Unix-based systems. if (!Shell.WINDOWS) { if (!Shell.checkIsBashSupported()) { String message = "Failing NodeManager start since we're on a " + "Unix-based system but bash doesn't seem to be available."; LOG.error(message); throw new YarnRuntimeException(message); } } // Remove the old hook if we are rebooting. if (hasToReboot && null != nodeManagerShutdownHook) { ShutdownHookManager.get().removeShutdownHook(nodeManagerShutdownHook); } nodeManagerShutdownHook = new CompositeServiceShutdownHook(this); ShutdownHookManager.get().addShutdownHook(nodeManagerShutdownHook, SHUTDOWN_HOOK_PRIORITY); // System exit should be called only when NodeManager is instantiated from // main() funtion this.shouldExitOnShutdownEvent = true; this.init(conf); this.start(); } catch (Throwable t) { LOG.error("Error starting NodeManager", t); System.exit(-1); } }
private void initAndStartNodeManager(Configuration conf, boolean hasToReboot) { try { // Failed to start if we're a Unix based system but we don't have bash. // Bash is necessary to launch containers under Unix-based systems. if (!Shell.WINDOWS) { if (!Shell.checkIsBashSupported()) { String message = "Failing NodeManager start since we're on a " + "Unix-based system but bash doesn't seem to be available."; LOG.fatal(message); throw new YarnRuntimeException(message); } } // Remove the old hook if we are rebooting. if (hasToReboot && null != nodeManagerShutdownHook) { ShutdownHookManager.get().removeShutdownHook(nodeManagerShutdownHook); } nodeManagerShutdownHook = new CompositeServiceShutdownHook(this); ShutdownHookManager.get().addShutdownHook(nodeManagerShutdownHook, SHUTDOWN_HOOK_PRIORITY); // System exit should be called only when NodeManager is instantiated from // main() funtion this.shouldExitOnShutdownEvent = true; this.init(conf); this.start(); } catch (Throwable t) { LOG.fatal("Error starting NodeManager", t); System.exit(-1); } }
public static void main(String[] argv) { Configuration conf = new YarnConfiguration(); Thread .setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); StringUtils.startupShutdownMessage(Router.class, argv, LOG); Router router = new Router(); try { // Remove the old hook if we are rebooting. if (null != routerShutdownHook) { ShutdownHookManager.get().removeShutdownHook(routerShutdownHook); } routerShutdownHook = new CompositeServiceShutdownHook(router); ShutdownHookManager.get().addShutdownHook(routerShutdownHook, SHUTDOWN_HOOK_PRIORITY); router.init(conf); router.start(); } catch (Throwable t) { LOG.error("Error starting Router", t); System.exit(-1); } } }
Assert.assertEquals(1, mgr.getShutdownHooksInOrder().size()); Assert.assertEquals(hook1, mgr.getShutdownHooksInOrder().get(0)); mgr.removeShutdownHook(hook1); Assert.assertFalse(mgr.hasShutdownHook(hook1));
Assert.assertEquals(1, mgr.getShutdownHooksInOrder().size()); Assert.assertEquals(hook1, mgr.getShutdownHooksInOrder().get(0)); mgr.removeShutdownHook(hook1); Assert.assertFalse(mgr.hasShutdownHook(hook1));