org.apache.hadoop.metrics2.lib.DefaultMetricsSystem Java Examples
The following examples show how to use
org.apache.hadoop.metrics2.lib.DefaultMetricsSystem.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ApplicationHistoryServer.java From hadoop with Apache License 2.0 | 6 votes |
@Override protected void serviceInit(Configuration conf) throws Exception { // init timeline services first timelineStore = createTimelineStore(conf); addIfService(timelineStore); secretManagerService = createTimelineDelegationTokenSecretManagerService(conf); addService(secretManagerService); timelineDataManager = createTimelineDataManager(conf); addService(timelineDataManager); // init generic history service afterwards aclsManager = createApplicationACLsManager(conf); historyManager = createApplicationHistoryManager(conf); ahsClientService = createApplicationHistoryClientService(historyManager); addService(ahsClientService); addService((Service) historyManager); DefaultMetricsSystem.initialize("ApplicationHistoryServer"); JvmMetrics.initSingleton("ApplicationHistoryServer", null); super.serviceInit(conf); }
Example #2
Source File: ResourceManager.java From hadoop with Apache License 2.0 | 6 votes |
@Override protected void serviceStop() throws Exception { DefaultMetricsSystem.shutdown(); if (rmContext != null) { RMStateStore store = rmContext.getStateStore(); try { store.close(); } catch (Exception e) { LOG.error("Error closing store.", e); } } super.serviceStop(); }
Example #3
Source File: TestRMHA.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { configuration = new Configuration(); UserGroupInformation.setConfiguration(configuration); configuration.setBoolean(YarnConfiguration.RM_HA_ENABLED, true); configuration.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID); for (String confKey : YarnConfiguration .getServiceAddressConfKeys(configuration)) { configuration.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS); configuration.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS); configuration.set(HAUtil.addSuffix(confKey, RM3_NODE_ID), RM3_ADDRESS); } // Enable webapp to test web-services also configuration.setBoolean(MockRM.ENABLE_WEBAPP, true); configuration.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); ClusterMetrics.destroy(); QueueMetrics.clearQueueMetrics(); DefaultMetricsSystem.shutdown(); }
Example #4
Source File: JmxCacheBuster.java From hbase with Apache License 2.0 | 6 votes |
@Override public void run() { if (LOG.isTraceEnabled()) { LOG.trace("Clearing JMX mbean cache."); } // This is pretty extreme but it's the best way that // I could find to get metrics to be removed. try { if (DefaultMetricsSystem.instance() != null) { DefaultMetricsSystem.instance().stop(); // Sleep some time so that the rest of the hadoop metrics // system knows that things are done Thread.sleep(500); DefaultMetricsSystem.instance().start(); } } catch (Exception exception) { LOG.debug("error clearing the jmx it appears the metrics system hasn't been started", exception); } }
Example #5
Source File: FSQueueMetrics.java From hadoop with Apache License 2.0 | 6 votes |
public synchronized static FSQueueMetrics forQueue(String queueName, Queue parent, boolean enableUserMetrics, Configuration conf) { MetricsSystem ms = DefaultMetricsSystem.instance(); QueueMetrics metrics = queueMetrics.get(queueName); if (metrics == null) { metrics = new FSQueueMetrics(ms, queueName, parent, enableUserMetrics, conf) .tag(QUEUE_INFO, queueName); // Register with the MetricsSystems if (ms != null) { metrics = ms.register( sourceName(queueName).toString(), "Metrics for queue: " + queueName, metrics); } queueMetrics.put(queueName, metrics); } return (FSQueueMetrics)metrics; }
Example #6
Source File: FSQueueMetrics.java From big-c with Apache License 2.0 | 6 votes |
public synchronized static FSQueueMetrics forQueue(String queueName, Queue parent, boolean enableUserMetrics, Configuration conf) { MetricsSystem ms = DefaultMetricsSystem.instance(); QueueMetrics metrics = queueMetrics.get(queueName); if (metrics == null) { metrics = new FSQueueMetrics(ms, queueName, parent, enableUserMetrics, conf) .tag(QUEUE_INFO, queueName); // Register with the MetricsSystems if (ms != null) { metrics = ms.register( sourceName(queueName).toString(), "Metrics for queue: " + queueName, metrics); } queueMetrics.put(queueName, metrics); } return (FSQueueMetrics)metrics; }
Example #7
Source File: RpcProgramNfs3.java From hadoop with Apache License 2.0 | 5 votes |
public static RpcProgramNfs3 createRpcProgramNfs3(NfsConfiguration config, DatagramSocket registrationSocket, boolean allowInsecurePorts) throws IOException { DefaultMetricsSystem.initialize("Nfs3"); String displayName = DNS.getDefaultHost("default", "default") + config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT); metrics = Nfs3Metrics.create(config, displayName); return new RpcProgramNfs3(config, registrationSocket, allowInsecurePorts); }
Example #8
Source File: BaseHttpServer.java From hadoop-ozone with Apache License 2.0 | 5 votes |
public void start() throws IOException { if (httpServer != null && isEnabled()) { httpServer.start(); if (prometheusSupport) { DefaultMetricsSystem.instance() .register("prometheus", "Hadoop metrics prometheus exporter", prometheusMetricsSink); } updateConnectorAddress(); } }
Example #9
Source File: TestFairScheduler.java From hadoop with Apache License 2.0 | 5 votes |
@After public void tearDown() { if (scheduler != null) { scheduler.stop(); scheduler = null; } if (resourceManager != null) { resourceManager.stop(); resourceManager = null; } QueueMetrics.clearQueueMetrics(); DefaultMetricsSystem.shutdown(); }
Example #10
Source File: StandaloneExample.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { final MetricRegistry metrics = new MetricRegistry(); final HadoopMetrics2Reporter metrics2Reporter = HadoopMetrics2Reporter.forRegistry(metrics).build( DefaultMetricsSystem.initialize("StandaloneTest"), // The application-level name "Test", // Component name "Test", // Component description "Test"); // Name for each metric record final ConsoleReporter consoleReporter = ConsoleReporter.forRegistry(metrics).build(); MetricsSystem metrics2 = DefaultMetricsSystem.instance(); // Writes to stdout without a filename configuration // Will be invoked every 10seconds by default FileSink sink = new FileSink(); metrics2.register("filesink", "filesink", sink); sink.init(new SubsetConfiguration(null, null) { public String getString(String key) { if (key.equals("filename")) { return null; } return super.getString(key); } }); // How often should the dropwizard reporter be invoked metrics2Reporter.start(500, TimeUnit.MILLISECONDS); // How often will the dropwziard metrics be logged to the console consoleReporter.start(2, TimeUnit.SECONDS); generateMetrics(metrics, 5000, 25, TimeUnit.MILLISECONDS, metrics2Reporter, 10); }
Example #11
Source File: BaseSourceImpl.java From hbase with Apache License 2.0 | 5 votes |
synchronized void init(String name) { if (inited) { return; } inited = true; DefaultMetricsSystem.initialize(HBASE_METRICS_SYSTEM_NAME); JvmMetrics.initSingleton(name, ""); // initialize hbase-metrics module based metric system as well. GlobalMetricRegistriesSource // initialization depends on the metric system being already initialized, that is why we are // doing it here. Once BaseSourceSourceImpl is removed, we should do the initialization of // these elsewhere. GlobalMetricRegistriesAdapter.init(); }
Example #12
Source File: NodeManager.java From big-c with Apache License 2.0 | 5 votes |
@Override protected void serviceStop() throws Exception { if (isStopping.getAndSet(true)) { return; } try { super.serviceStop(); DefaultMetricsSystem.shutdown(); } finally { // YARN-3641: NM's services stop get failed shouldn't block the // release of NMLevelDBStore. stopRecoveryStore(); } }
Example #13
Source File: TestMRAppMaster.java From big-c with Apache License 2.0 | 5 votes |
@BeforeClass public static void setup() throws AccessControlException, FileNotFoundException, IllegalArgumentException, IOException { //Do not error out if metrics are inited multiple times DefaultMetricsSystem.setMiniClusterMode(true); File dir = new File(stagingDir); stagingDir = dir.getAbsolutePath(); localFS = FileContext.getLocalFSFileContext(); localFS.delete(new Path(testDir.getAbsolutePath()), true); testDir.mkdir(); }
Example #14
Source File: TestNodeStatusUpdater.java From big-c with Apache License 2.0 | 5 votes |
@After public void tearDown() { this.registeredNodes.clear(); heartBeatID = 0; ServiceOperations.stop(nm); assertionFailedInThread.set(false); DefaultMetricsSystem.shutdown(); }
Example #15
Source File: SCMPipelineMetrics.java From hadoop-ozone with Apache License 2.0 | 5 votes |
/** * Create and returns SCMPipelineMetrics instance. * * @return SCMPipelineMetrics */ public static synchronized SCMPipelineMetrics create() { if (instance != null) { return instance; } MetricsSystem ms = DefaultMetricsSystem.instance(); instance = ms.register(SOURCE_NAME, "SCM PipelineManager Metrics", new SCMPipelineMetrics()); return instance; }
Example #16
Source File: StartupProgressMetrics.java From big-c with Apache License 2.0 | 5 votes |
/** * Creates a new StartupProgressMetrics registered with the metrics system. * * @param startupProgress StartupProgress to link */ public StartupProgressMetrics(StartupProgress startupProgress) { this.startupProgress = startupProgress; DefaultMetricsSystem.instance().register( STARTUP_PROGRESS_METRICS_INFO.name(), STARTUP_PROGRESS_METRICS_INFO.description(), this); }
Example #17
Source File: SharedCacheManager.java From big-c with Apache License 2.0 | 5 votes |
@Override protected void serviceInit(Configuration conf) throws Exception { this.store = createSCMStoreService(conf); addService(store); CleanerService cs = createCleanerService(store); addService(cs); SharedCacheUploaderService nms = createNMCacheUploaderSCMProtocolService(store); addService(nms); ClientProtocolService cps = createClientProtocolService(store); addService(cps); SCMAdminProtocolService saps = createSCMAdminProtocolService(cs); addService(saps); SCMWebServer webUI = createSCMWebServer(this); addService(webUI); // init metrics DefaultMetricsSystem.initialize("SharedCacheManager"); JvmMetrics.initSingleton("SharedCacheManager", null); super.serviceInit(conf); }
Example #18
Source File: MBeans.java From big-c with Apache License 2.0 | 5 votes |
static private ObjectName getMBeanName(String serviceName, String nameName) { ObjectName name = null; String nameStr = "Hadoop:service="+ serviceName +",name="+ nameName; try { name = DefaultMetricsSystem.newMBeanName(nameStr); } catch (Exception e) { LOG.warn("Error creating MBean object name: "+ nameStr, e); } return name; }
Example #19
Source File: RocksDBStoreMBean.java From hadoop-ozone with Apache License 2.0 | 5 votes |
public static RocksDBStoreMBean create(Statistics statistics, String contextName) { RocksDBStoreMBean rocksDBStoreMBean = new RocksDBStoreMBean( statistics, contextName); MetricsSystem ms = DefaultMetricsSystem.instance(); MetricsSource metricsSource = ms.getSource(rocksDBStoreMBean.contextName); if (metricsSource != null) { return (RocksDBStoreMBean)metricsSource; } else { return ms.register(rocksDBStoreMBean.contextName, "RocksDB Metrics", rocksDBStoreMBean); } }
Example #20
Source File: QueueMetrics.java From hadoop with Apache License 2.0 | 5 votes |
public synchronized static QueueMetrics forQueue(String queueName, Queue parent, boolean enableUserMetrics, Configuration conf) { return forQueue(DefaultMetricsSystem.instance(), queueName, parent, enableUserMetrics, conf); }
Example #21
Source File: FSOpDurations.java From hadoop with Apache License 2.0 | 5 votes |
private FSOpDurations() { registry = new MetricsRegistry(RECORD_INFO); registry.tag(RECORD_INFO, "FSOpDurations"); MetricsSystem ms = DefaultMetricsSystem.instance(); if (ms != null) { ms.register(RECORD_INFO.name(), RECORD_INFO.description(), this); } }
Example #22
Source File: TezChild.java From incubator-tez with Apache License 2.0 | 5 votes |
private void shutdown() { executor.shutdownNow(); if (taskReporter != null) { taskReporter.shutdown(); } RPC.stopProxy(umbilical); DefaultMetricsSystem.shutdown(); LogManager.shutdown(); }
Example #23
Source File: TestBalancerStatusTagInJMXMetrics.java From hbase with Apache License 2.0 | 5 votes |
/** * Gets the balancer status tag from the Metrics registry */ public String getStatus() throws Exception { MetricsSource source = DefaultMetricsSystem.instance().getSource(MetricsBalancerSource.METRICS_JMX_CONTEXT); if (source instanceof MetricsBalancerSourceImpl) { MetricsTag status = ((MetricsBalancerSourceImpl) source).getMetricsRegistry() .getTag(MetricsBalancerSource.BALANCER_STATUS); return status.value(); } else { LOG.warn("Balancer JMX Metrics not registered"); throw new Exception("MetricsBalancer JMX Context not found"); } }
Example #24
Source File: MiniOzoneChaosCluster.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@Override public MiniOzoneChaosCluster build() throws IOException { DefaultMetricsSystem.setMiniClusterMode(true); initializeConfiguration(); if (numOfOMs > 1) { initOMRatisConf(); } StorageContainerManager scm; List<OzoneManager> omList; try { scm = createSCM(); scm.start(); if (numOfOMs > 1) { omList = createOMService(); } else { OzoneManager om = createOM(); om.start(); omList = Arrays.asList(om); } } catch (AuthenticationException ex) { throw new IOException("Unable to build MiniOzoneCluster. ", ex); } final List<HddsDatanodeService> hddsDatanodes = createHddsDatanodes( scm, null); MiniOzoneChaosCluster cluster = new MiniOzoneChaosCluster(conf, omList, scm, hddsDatanodes, omServiceId, clazzes); if (startDataNodes) { cluster.startHddsDatanodes(); } return cluster; }
Example #25
Source File: ClientSCMMetrics.java From big-c with Apache License 2.0 | 5 votes |
static ClientSCMMetrics create() { MetricsSystem ms = DefaultMetricsSystem.instance(); ClientSCMMetrics metrics = new ClientSCMMetrics(); ms.register("clientRequests", null, metrics); return metrics; }
Example #26
Source File: MiniOzoneClusterImpl.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@Override public void shutdown() { try { LOG.info("Shutting down the Mini Ozone Cluster"); File baseDir = new File(GenericTestUtils.getTempPath( MiniOzoneClusterImpl.class.getSimpleName() + "-" + scm.getClientProtocolServer().getScmInfo().getClusterId())); stop(); FileUtils.deleteDirectory(baseDir); ContainerCache.getInstance(conf).shutdownCache(); DefaultMetricsSystem.shutdown(); } catch (IOException e) { LOG.error("Exception while shutting down the cluster.", e); } }
Example #27
Source File: TestSecureOzoneCluster.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@Before public void init() { try { conf = new OzoneConfiguration(); conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost"); conf.setInt(OZONE_SCM_CLIENT_PORT_KEY, getPort(OZONE_SCM_CLIENT_PORT_DEFAULT, 100)); conf.setInt(OZONE_SCM_DATANODE_PORT_KEY, getPort(OZONE_SCM_DATANODE_PORT_DEFAULT, 100)); conf.setInt(OZONE_SCM_BLOCK_CLIENT_PORT_KEY, getPort(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, 100)); conf.setInt(OZONE_SCM_SECURITY_SERVICE_PORT_KEY, getPort(OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT, 100)); DefaultMetricsSystem.setMiniClusterMode(true); final String path = folder.newFolder().toString(); Path metaDirPath = Paths.get(path, "om-meta"); conf.set(OZONE_METADATA_DIRS, metaDirPath.toString()); conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.name()); workDir = GenericTestUtils.getTestDir(getClass().getSimpleName()); startMiniKdc(); setSecureConfig(); createCredentialsInKDC(); generateKeyPair(); // OzoneManager.setTestSecureOmFlag(true); } catch (Exception e) { LOG.error("Failed to initialize TestSecureOzoneCluster", e); } }
Example #28
Source File: TestMRAppMaster.java From hadoop with Apache License 2.0 | 5 votes |
@BeforeClass public static void setup() throws AccessControlException, FileNotFoundException, IllegalArgumentException, IOException { //Do not error out if metrics are inited multiple times DefaultMetricsSystem.setMiniClusterMode(true); File dir = new File(stagingDir); stagingDir = dir.getAbsolutePath(); localFS = FileContext.getLocalFSFileContext(); localFS.delete(new Path(testDir.getAbsolutePath()), true); testDir.mkdir(); }
Example #29
Source File: ApplicationHistoryServer.java From hadoop with Apache License 2.0 | 5 votes |
@Override protected void serviceStop() throws Exception { if (webApp != null) { webApp.stop(); } DefaultMetricsSystem.shutdown(); super.serviceStop(); }
Example #30
Source File: TestSecureContainerServer.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@BeforeClass static public void setup() throws Exception { DefaultMetricsSystem.setMiniClusterMode(true); CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR); CONF.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); CONF.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); caClient = new CertificateClientTestImpl(CONF); }