/** * Register the service for shutdown with Hadoop's * {@link ShutdownHookManager}. * @param priority shutdown hook priority */ public synchronized void register(int priority) { unregister(); ShutdownHookManager.get().addShutdownHook(this, priority); }
if (ShutdownHookManager.get().isShutdownInProgress()) { LOG.error("Thread {} threw an error during shutdown: {}.", thread.toString(),
/** * Unregister the hook. */ public synchronized void unregister() { try { ShutdownHookManager.get().removeShutdownHook(this); } catch (IllegalStateException e) { LOG.info("Failed to unregister shutdown hook: {}", e, e); } }
&& !ShutdownHookManager.get().isShutdownInProgress()) { ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY);
private void initAndStartNodeManager(Configuration conf, boolean hasToReboot) { try { // Remove the old hook if we are rebooting. if (hasToReboot && null != nodeManagerShutdownHook) { ShutdownHookManager.get().removeShutdownHook(nodeManagerShutdownHook); } nodeManagerShutdownHook = new CompositeServiceShutdownHook(this); ShutdownHookManager.get().addShutdownHook(nodeManagerShutdownHook, SHUTDOWN_HOOK_PRIORITY); // System exit should be called only when NodeManager is instantiated from // main() funtion this.shouldExitOnShutdownEvent = true; this.init(conf); this.start(); } catch (Throwable t) { LOG.fatal("Error starting NodeManager", t); System.exit(-1); } }
@Test public void shutdownHookManager() { ShutdownHookManager mgr = ShutdownHookManager.get(); Assert.assertNotNull(mgr); Assert.assertEquals(0, mgr.getShutdownHooksInOrder().size()); Runnable hook1 = new Runnable() { @Override mgr.addShutdownHook(hook1, 0); Assert.assertTrue(mgr.hasShutdownHook(hook1)); Assert.assertEquals(1, mgr.getShutdownHooksInOrder().size()); Assert.assertEquals(hook1, mgr.getShutdownHooksInOrder().get(0)); mgr.removeShutdownHook(hook1); Assert.assertFalse(mgr.hasShutdownHook(hook1)); mgr.addShutdownHook(hook1, 0); Assert.assertTrue(mgr.hasShutdownHook(hook1)); Assert.assertEquals(1, mgr.getShutdownHooksInOrder().size()); Assert.assertTrue(mgr.hasShutdownHook(hook1)); Assert.assertEquals(1, mgr.getShutdownHooksInOrder().size()); mgr.addShutdownHook(hook2, 1); Assert.assertTrue(mgr.hasShutdownHook(hook1)); Assert.assertTrue(mgr.hasShutdownHook(hook2)); Assert.assertEquals(2, mgr.getShutdownHooksInOrder().size()); Assert.assertEquals(hook2, mgr.getShutdownHooksInOrder().get(0)); Assert.assertEquals(hook1, mgr.getShutdownHooksInOrder().get(1));
/** * Adds a shutdownHook with a priority, the higher the priority * the earlier will run. ShutdownHooks with same priority run * in a non-deterministic order. * * @param shutdownHook shutdownHook <code>Runnable</code> * @param priority priority of the shutdownHook. */ public static void addShutdownHook(Runnable shutdownHook, int priority) { if (priority < 0) { throw new IllegalArgumentException("Priority should be greater than or equal to zero"); } MGR.addShutdownHook(shutdownHook, priority); }
/** * Indicates if shutdown is in progress or not. * * @return TRUE if the shutdown is in progress, otherwise FALSE. */ public static boolean isShutdownInProgress() { return MGR.isShutdownInProgress(); }
/** * Removes a shutdownHook. * * @param shutdownHook shutdownHook to remove. * @return TRUE if the shutdownHook was registered and removed, * FALSE otherwise (including when shutdownHook == null) */ public static boolean removeShutdownHook(Runnable shutdownHook) { if (shutdownHook == null) { return false; } return MGR.removeShutdownHook(shutdownHook); }
/** * Execute the shutdown. * This is exposed purely for testing: do not invoke it. * @return the number of shutdown hooks which timed out. */ @InterfaceAudience.Private @VisibleForTesting static int executeShutdown() { int timeouts = 0; for (HookEntry entry: MGR.getShutdownHooksInOrder()) { Future<?> future = EXECUTOR.submit(entry.getHook()); try { future.get(entry.getTimeout(), entry.getTimeUnit()); } catch (TimeoutException ex) { timeouts++; future.cancel(true); LOG.warn("ShutdownHook '" + entry.getHook().getClass(). getSimpleName() + "' timeout, " + ex.toString(), ex); } catch (Throwable ex) { LOG.warn("ShutdownHook '" + entry.getHook().getClass(). getSimpleName() + "' failed, " + ex.toString(), ex); } } return timeouts; }
private FileSystem getInternal(URI uri, Configuration conf, Key key) throws IOException{ FileSystem fs; synchronized (this) { fs = map.get(key); } if (fs != null) { return fs; } fs = createFileSystem(uri, conf); synchronized (this) { // refetch the lock again FileSystem oldfs = map.get(key); if (oldfs != null) { // a file system is created while lock is releasing fs.close(); // close the new file system return oldfs; // return the old file system } // now insert the new file system into the map if (map.isEmpty() && !ShutdownHookManager.get().isShutdownInProgress()) { ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY); } fs.key = key; map.put(key, fs); if (conf.getBoolean("fs.automatic.close", true)) { toAutoClose.add(key); } return fs; } }
private void initAndStartNodeManager(Configuration conf, boolean hasToReboot) { try { // Remove the old hook if we are rebooting. if (hasToReboot && null != nodeManagerShutdownHook) { ShutdownHookManager.get().removeShutdownHook(nodeManagerShutdownHook); } nodeManagerShutdownHook = new CompositeServiceShutdownHook(this); ShutdownHookManager.get().addShutdownHook(nodeManagerShutdownHook, SHUTDOWN_HOOK_PRIORITY); // System exit should be called only when NodeManager is instantiated from // main() funtion this.shouldExitOnShutdownEvent = true; this.init(conf); this.start(); } catch (Throwable t) { LOG.fatal("Error starting NodeManager", t); System.exit(-1); } }
@Test public void shutdownHookManager() { ShutdownHookManager mgr = ShutdownHookManager.get(); Assert.assertNotNull(mgr); Assert.assertEquals(0, mgr.getShutdownHooksInOrder().size()); Runnable hook1 = new Runnable() { @Override mgr.addShutdownHook(hook1, 0); Assert.assertTrue(mgr.hasShutdownHook(hook1)); Assert.assertEquals(1, mgr.getShutdownHooksInOrder().size()); Assert.assertEquals(hook1, mgr.getShutdownHooksInOrder().get(0)); mgr.removeShutdownHook(hook1); Assert.assertFalse(mgr.hasShutdownHook(hook1)); mgr.addShutdownHook(hook1, 0); Assert.assertTrue(mgr.hasShutdownHook(hook1)); Assert.assertEquals(1, mgr.getShutdownHooksInOrder().size()); Assert.assertTrue(mgr.hasShutdownHook(hook1)); Assert.assertEquals(1, mgr.getShutdownHooksInOrder().size()); mgr.addShutdownHook(hook2, 1); Assert.assertTrue(mgr.hasShutdownHook(hook1)); Assert.assertTrue(mgr.hasShutdownHook(hook2)); Assert.assertEquals(2, mgr.getShutdownHooksInOrder().size()); Assert.assertEquals(hook2, mgr.getShutdownHooksInOrder().get(0)); Assert.assertEquals(hook1, mgr.getShutdownHooksInOrder().get(1));
/** * Print a log message for starting up and shutting down * @param clazz the class of the server * @param args arguments * @param LOG the target log object */ private static void startupShutdownMessage(Class<?> clazz, String[] args, final org.slf4j.Logger LOG) { final String hostname = getHostname(); final String classname = clazz.getSimpleName(); LOG.info( toStartupShutdownString("STARTUP_MSG: ", new String[] { "Starting " + classname, " host = " + hostname, " args = " + Arrays.asList(args), " version = " + MetastoreVersionInfo.getVersion(), " classpath = " + System.getProperty("java.class.path"), " build = " + MetastoreVersionInfo.getUrl() + " -r " + MetastoreVersionInfo.getRevision() + "; compiled by '" + MetastoreVersionInfo.getUser() + "' on " + MetastoreVersionInfo.getDate()} ) ); shutdownHookMgr.addShutdownHook( () -> LOG.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{ "Shutting down " + classname + " at " + hostname})), 0); }
/** * deregister file from delete-on-exit hook */ public static void cancelDeleteOnExit(File file) { if (MGR.isShutdownInProgress()) { LOG.warn("Shutdown in progress, cannot cancel a deleteOnExit"); } DELETE_ON_EXIT_HOOK.deleteTargets.remove(file); }
/** * Removes a shutdownHook. * * @param shutdownHook shutdownHook to remove. * @return TRUE if the shutdownHook was registered and removed, * FALSE otherwise (including when shutdownHook == null) */ public static boolean removeShutdownHook(Runnable shutdownHook) { if (shutdownHook == null) { return false; } return MGR.removeShutdownHook(shutdownHook); }
@Override public void run() { MGR.shutdownInProgress.set(true); for (Runnable hook: MGR.getShutdownHooksInOrder()) { try { hook.run(); } catch (Throwable ex) { LOG.warn("ShutdownHook '" + hook.getClass().getSimpleName() + "' failed, " + ex.toString(), ex); } } } }
/** * Adds a shutdown hook that tries to call {@link Closeable#close()} on the given argument * at JVM shutdown. This integrates with Hadoop's {@link ShutdownHookManager} in order to * better interact with Spark's usage of the same. * * @param closeable thing to close */ public static void closeAtShutdown(Closeable closeable) { if (SHUTDOWN_HOOK.addCloseable(closeable)) { try { // Spark uses SHUTDOWN_HOOK_PRIORITY + 30; this tries to execute earlier ShutdownHookManager.get().addShutdownHook(SHUTDOWN_HOOK, FileSystem.SHUTDOWN_HOOK_PRIORITY + 40); } catch (IllegalStateException ise) { log.warn("Can't close {} at shutdown since shutdown is in progress", closeable); } } }
protected boolean isShuttingDown() { return ShutdownHookManager.get().isShutdownInProgress(); }
private FileSystem getInternal(URI uri, Configuration conf, Key key) throws IOException{ FileSystem fs; synchronized (this) { fs = map.get(key); } if (fs != null) { return fs; } fs = createFileSystem(uri, conf); synchronized (this) { // refetch the lock again FileSystem oldfs = map.get(key); if (oldfs != null) { // a file system is created while lock is releasing fs.close(); // close the new file system return oldfs; // return the old file system } // now insert the new file system into the map if (map.isEmpty() && !ShutdownHookManager.get().isShutdownInProgress()) { ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY); } fs.key = key; map.put(key, fs); if (conf.getBoolean("fs.automatic.close", true)) { toAutoClose.add(key); } return fs; } }