/** * Adds a shutdownHook with a priority, the higher the priority * the earlier will run. ShutdownHooks with same priority run * in a non-deterministic order. * * @param shutdownHook shutdownHook <code>Runnable</code> * @param priority priority of the shutdownHook. */ public static void addShutdownHook(Runnable shutdownHook, int priority) { if (priority < 0) { throw new IllegalArgumentException("Priority should be greater than or equal to zero"); } MGR.addShutdownHook(shutdownHook, priority); }
/** * Register the service for shutdown with Hadoop's * {@link ShutdownHookManager}. * @param priority shutdown hook priority */ public synchronized void register(int priority) { unregister(); ShutdownHookManager.get().addShutdownHook(this, priority); }
/** * Adds a shutdown hook that tries to call {@link Closeable#close()} on the given argument * at JVM shutdown. This integrates with Hadoop's {@link ShutdownHookManager} in order to * better interact with Spark's usage of the same. * * @param closeable thing to close */ public static void closeAtShutdown(Closeable closeable) { if (SHUTDOWN_HOOK.addCloseable(closeable)) { try { // Spark uses SHUTDOWN_HOOK_PRIORITY + 30; this tries to execute earlier ShutdownHookManager.get().addShutdownHook(SHUTDOWN_HOOK, FileSystem.SHUTDOWN_HOOK_PRIORITY + 40); } catch (IllegalStateException ise) { log.warn("Can't close {} at shutdown since shutdown is in progress", closeable); } } }
ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY);
ShutdownHookManager.get().addShutdownHook(FINALIZER, SHUTDOWN_HOOK_PRIORITY);
ShutdownHookManager.get().addShutdownHook( new Runnable() { @Override
/** * Print a log message for starting up and shutting down * @param clazz the class of the server * @param args arguments * @param LOG the target log object */ private static void startupShutdownMessage(Class<?> clazz, String[] args, final org.slf4j.Logger LOG) { final String hostname = getHostname(); final String classname = clazz.getSimpleName(); LOG.info( toStartupShutdownString("STARTUP_MSG: ", new String[] { "Starting " + classname, " host = " + hostname, " args = " + Arrays.asList(args), " version = " + MetastoreVersionInfo.getVersion(), " classpath = " + System.getProperty("java.class.path"), " build = " + MetastoreVersionInfo.getUrl() + " -r " + MetastoreVersionInfo.getRevision() + "; compiled by '" + MetastoreVersionInfo.getUser() + "' on " + MetastoreVersionInfo.getDate()} ) ); shutdownHookMgr.addShutdownHook( () -> LOG.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{ "Shutting down " + classname + " at " + hostname})), 0); }
static JobHistoryServer launchJobHistoryServer(String[] args) { Thread. setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG); JobHistoryServer jobHistoryServer = null; try { jobHistoryServer = new JobHistoryServer(); ShutdownHookManager.get().addShutdownHook( new CompositeServiceShutdownHook(jobHistoryServer), SHUTDOWN_HOOK_PRIORITY); YarnConfiguration conf = new YarnConfiguration(new JobConf()); new GenericOptionsParser(conf, args); jobHistoryServer.init(conf); jobHistoryServer.start(); } catch (Throwable t) { LOG.fatal("Error starting JobHistoryServer", t); ExitUtil.terminate(-1, "Error starting JobHistoryServer"); } return jobHistoryServer; }
shutdownHookMgr.addShutdownHook(() -> { String shutdownMsg = "Shutting down hive metastore."; HMSHandler.LOG.info(shutdownMsg);
static void startupShutdownMessage(Class<?> clazz, String[] args, final LogAdapter LOG) { final String hostname = NetUtils.getHostname(); final String classname = clazz.getSimpleName(); LOG.info(createStartupShutdownMessage(classname, hostname, args)); if (SystemUtils.IS_OS_UNIX) { try { SignalLogger.INSTANCE.register(LOG); } catch (Throwable t) { LOG.warn("failed to register any UNIX signal loggers: ", t); } } ShutdownHookManager.get().addShutdownHook( new Runnable() { @Override public void run() { LOG.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{ "Shutting down " + classname + " at " + hostname})); } }, SHUTDOWN_HOOK_PRIORITY); }
private static void enableTracing(String hostname, String service, String spanReceivers, String zookeepers, long timeout, String zkPath, Map<String,String> properties) { Configuration conf = new Configuration(false); conf.set(Property.TRACE_SPAN_RECEIVERS.toString(), spanReceivers); // remaining properties will be parsed through an HTraceConfiguration by SpanReceivers setProperty(conf, TRACER_ZK_HOST, zookeepers); setProperty(conf, TRACER_ZK_TIMEOUT, (int) timeout); setProperty(conf, TRACER_ZK_PATH, zkPath); for (Entry<String,String> property : properties.entrySet()) { setProperty(conf, property.getKey().substring(Property.TRACE_SPAN_RECEIVER_PREFIX.getKey().length()), property.getValue()); } if (hostname != null) { setProperty(conf, TRACE_HOST_PROPERTY, hostname); } if (service != null) { setProperty(conf, TRACE_SERVICE_PROPERTY, service); } org.apache.htrace.Trace.setProcessId(service); ShutdownHookManager.get().addShutdownHook(() -> { Trace.off(); closeReceivers(); }, 0); loadSpanReceivers(conf); }
ShutdownHookManager.get().addShutdownHook(shutdownHook, SHUTDOWN_HOOK_PRIORITY);
/** * Adds a shutdownHook with a priority, the higher the priority * the earlier will run. ShutdownHooks with same priority run * in a non-deterministic order. * * @param shutdownHook shutdownHook <code>Runnable</code> * @param priority priority of the shutdownHook. */ public static void addShutdownHook(Runnable shutdownHook, int priority) { if (priority < 0) { throw new IllegalArgumentException("Priority should be greater than or equal to zero"); } MGR.addShutdownHook(shutdownHook, priority); }
/** * Add shutdown hook that runs before the FileSystem cache shutdown happens. * * @param hook code to execute during shutdown * @param priority Priority over the FileSystem.SHUTDOWN_HOOK_PRIORITY */ public static void addShutdownHookWithPriority(Runnable hook, int priority) { ShutdownHookManager.get().addShutdownHook(hook, FileSystem.SHUTDOWN_HOOK_PRIORITY + priority); } }
public void register(int priority) { unregister(); hook = this; ShutdownHookManager.get().addShutdownHook(hook, priority); }
public void register(int priority) { unregister(); hook = this; ShutdownHookManager.get().addShutdownHook(hook, priority); }
public void start(boolean register) { startTCPServer(); // Start TCP server if (register) { ShutdownHookManager.get().addShutdownHook(new NfsShutdownHook(), SHUTDOWN_HOOK_PRIORITY); try { rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort); } catch (Throwable e) { LOG.fatal("Failed to register the NFSv3 service.", e); terminate(1, e); } } }
/** * Start proxy server. * * @return proxy server instance. */ protected static WebAppProxyServer startServer(Configuration configuration) throws Exception { WebAppProxyServer proxy = new WebAppProxyServer(); ShutdownHookManager.get().addShutdownHook( new CompositeServiceShutdownHook(proxy), SHUTDOWN_HOOK_PRIORITY); proxy.init(configuration); proxy.start(); return proxy; }
/** * Start proxy server. * * @return proxy server instance. */ protected static WebAppProxyServer startServer(Configuration configuration) throws Exception { WebAppProxyServer proxy = new WebAppProxyServer(); ShutdownHookManager.get().addShutdownHook( new CompositeServiceShutdownHook(proxy), SHUTDOWN_HOOK_PRIORITY); proxy.init(configuration); proxy.start(); return proxy; }
@Override protected Injector doInit(TwillContext context) { name = context.getSpecification().getName(); injector = createGuiceInjector(getCConfiguration(), getConfiguration(), context); // Register shutdown hook to stop Log Saver before Hadoop Filesystem shuts down ShutdownHookManager.get().addShutdownHook(new Runnable() { @Override public void run() { LOG.info("Shutdown hook triggered."); stop(); } }, FileSystem.SHUTDOWN_HOOK_PRIORITY + 1); return injector; }