public static void init() { enabled = KylinConfig.getInstanceFromEnv().getQueryMetricsEnabled(); if (!enabled) return; DefaultMetricsSystem.initialize("Kylin"); }
@Override protected void serviceStart() throws Exception { DefaultMetricsSystem.initialize("JobHistoryServer"); JvmMetrics.initSingleton("JobHistoryServer", null); super.serviceStart(); deleteLogManager = new Thread(new deleteLogMonitor()); deleteLogManager.setName("Log-delete-monitor"); deleteLogManager.setDaemon(true); deleteLogManager.start(); }
synchronized void init(String name) { if (inited) { return; } inited = true; DefaultMetricsSystem.initialize(HBASE_METRICS_SYSTEM_NAME); JvmMetrics.initSingleton(name, ""); // initialize hbase-metrics module based metric system as well. GlobalMetricRegistriesSource // initialization depends on the metric system being already initialized, that is why we are // doing it here. Once BaseSourceSourceImpl is removed, we should do the initialization of // these elsewhere. GlobalMetricRegistriesAdapter.init(); } }
public Metrics2Reporter(MetricRegistry registry, KylinConfig conf) { this.metricRegistry = registry; this.conf = conf; String applicationName = "kylin"; reporter = HadoopMetrics2Reporter.forRegistry(metricRegistry).convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS).build(DefaultMetricsSystem.initialize(applicationName), // The application-level name applicationName, // Component name applicationName, // Component description "General"); // Name for each metric record }
public Metrics2Reporter(MetricRegistry registry, HiveConf conf) { this.metricRegistry = registry; this.conf = conf; String applicationName = conf.get(HiveConf.ConfVars.HIVE_METRICS_HADOOP2_COMPONENT_NAME.varname); reporter = HadoopMetrics2Reporter.forRegistry(metricRegistry) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS) .build(DefaultMetricsSystem.initialize(applicationName), // The application-level name applicationName, // Component name applicationName, // Component description "General"); // Name for each metric record }
.convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS) .build(DefaultMetricsSystem.initialize(applicationName), applicationName, "Runtime metadata" + " catalog", "General"); reporter.start(1, TimeUnit.MINUTES);
private GlobalMetricRegistriesAdapter() { DefaultMetricsSystem.initialize("Phoenix"); JvmMetrics.initSingleton("Phoenix", ""); }
public static void main(String[] args) throws Exception { final MetricRegistry metrics = new MetricRegistry(); final HadoopMetrics2Reporter metrics2Reporter = HadoopMetrics2Reporter.forRegistry(metrics).build( DefaultMetricsSystem.initialize("StandaloneTest"), // The application-level name "Test", // Component name "Test", // Component description "Test"); // Name for each metric record final ConsoleReporter consoleReporter = ConsoleReporter.forRegistry(metrics).build(); MetricsSystem metrics2 = DefaultMetricsSystem.instance(); // Writes to stdout without a filename configuration // Will be invoked every 10seconds by default FileSink sink = new FileSink(); metrics2.register("filesink", "filesink", sink); sink.init(new SubsetConfiguration(null, null) { public String getString(String key) { if (key.equals("filename")) { return null; } return super.getString(key); } }); // How often should the dropwizard reporter be invoked metrics2Reporter.start(500, TimeUnit.MILLISECONDS); // How often will the dropwziard metrics be logged to the console consoleReporter.start(2, TimeUnit.SECONDS); generateMetrics(metrics, 5000, 25, TimeUnit.MILLISECONDS, metrics2Reporter, 10); }
/** * Make an instance of DataNode after ensuring that at least one of the * given data directories (and their parent directories, if necessary) * can be created. * @param dataDirs List of directories, where the new DataNode instance should * keep its files. * @param conf Configuration instance to use. * @param resources Secure resources needed to run under Kerberos * @return DataNode instance for given list of data dirs and conf, or null if * no directory from this directory list can be created. * @throws IOException */ static DataNode makeInstance(Collection<StorageLocation> dataDirs, Configuration conf, SecureResources resources) throws IOException { List<StorageLocation> locations; StorageLocationChecker storageLocationChecker = new StorageLocationChecker(conf, new Timer()); try { locations = storageLocationChecker.check(conf, dataDirs); } catch (InterruptedException ie) { throw new IOException("Failed to instantiate DataNode", ie); } DefaultMetricsSystem.initialize("DataNode"); assert locations.size() > 0 : "number of data directories should be > 0"; return new DataNode(conf, locations, storageLocationChecker, resources); }
validateAndCreateJournalDir(journalDir); DefaultMetricsSystem.initialize("JournalNode"); JvmMetrics.create("JournalNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
case CHECKPOINT: NamenodeRole role = startOpt.toNodeRole(); DefaultMetricsSystem.initialize(role.toString().replace(" ", "")); return new BackupNode(conf, role); case RECOVER: return null; // avoid javac warning case UPGRADEONLY: DefaultMetricsSystem.initialize("NameNode"); new NameNode(conf); terminate(0); return null; default: DefaultMetricsSystem.initialize("NameNode"); return new NameNode(conf);
DefaultMetricsSystem.initialize("SecondaryNameNode"); JvmMetrics.create("SecondaryNameNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
public static LocalJobRunnerMetrics create() { MetricsSystem ms = DefaultMetricsSystem.initialize("JobTracker"); return ms.register("LocalJobRunnerMetrics-" + ThreadLocalRandom.current().nextInt(), null, new LocalJobRunnerMetrics()); }
synchronized void init(String name) { if (inited) return; inited = true; DefaultMetricsSystem.initialize(HBASE_METRICS_SYSTEM_NAME); JvmMetrics.initSingleton(name, ""); // initialize hbase-metrics module based metric system as well. GlobalMetricRegistriesSource // initialization depends on the metric system being already initialized, that is why we are // doing it here. Once BaseSourceSourceImpl is removed, we should do the initialization of // these elsewhere. GlobalMetricRegistriesAdapter.init(); } }
@Override protected void serviceStart() throws Exception { DefaultMetricsSystem.initialize("JobHistoryServer"); JvmMetrics.initSingleton("JobHistoryServer", null); super.serviceStart(); }
@Override protected void serviceStart() throws Exception { DefaultMetricsSystem.initialize("JobHistoryServer"); JvmMetrics.initSingleton("JobHistoryServer", null); super.serviceStart(); }
public void start() throws IOException { httpServer.start(); DefaultMetricsSystem.initialize(processName); final JvmMetrics jm = JvmMetrics.initSingleton(processName, sessionId); jm.setPauseMonitor(pauseMonitor); pauseMonitor.start(); }
public Metrics2Reporter(MetricRegistry registry, HiveConf conf) { this.metricRegistry = registry; this.conf = conf; String applicationName = conf.get(HiveConf.ConfVars.HIVE_METRICS_HADOOP2_COMPONENT_NAME.varname); reporter = HadoopMetrics2Reporter.forRegistry(metricRegistry) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS) .build(DefaultMetricsSystem.initialize(applicationName), // The application-level name applicationName, // Component name applicationName, // Component description "General"); // Name for each metric record }
@Before public void setup() { DefaultMetricsSystem.initialize("ResourceManager"); metrics = ClusterMetrics.getMetrics(); }
@Override protected void serviceInit(Configuration conf) throws Exception { Configuration config = new YarnConfiguration(conf); doSecureLogin(conf); proxy = new WebAppProxy(); addService(proxy); DefaultMetricsSystem.initialize("WebAppProxyServer"); JvmMetrics jm = JvmMetrics.initSingleton("WebAppProxyServer", null); pauseMonitor = new JvmPauseMonitor(conf); jm.setPauseMonitor(pauseMonitor); super.serviceInit(config); }