Java Code Examples for org.apache.hadoop.metrics2.source.JvmMetrics#create()
The following examples show how to use
org.apache.hadoop.metrics2.source.JvmMetrics#create() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: JournalNode.java From hadoop with Apache License 2.0 | 6 votes |
/** * Start listening for edits via RPC. */ public void start() throws IOException { Preconditions.checkState(!isStarted(), "JN already running"); validateAndCreateJournalDir(localDir); DefaultMetricsSystem.initialize("JournalNode"); JvmMetrics.create("JournalNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance()); InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf); SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); registerJNMXBean(); httpServer = new JournalNodeHttpServer(conf, this); httpServer.start(); httpServerURI = httpServer.getServerURI().toString(); rpcServer = new JournalNodeRpcServer(conf, this); rpcServer.start(); }
Example 2
Source File: JournalNode.java From big-c with Apache License 2.0 | 6 votes |
/** * Start listening for edits via RPC. */ public void start() throws IOException { Preconditions.checkState(!isStarted(), "JN already running"); validateAndCreateJournalDir(localDir); DefaultMetricsSystem.initialize("JournalNode"); JvmMetrics.create("JournalNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance()); InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf); SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); registerJNMXBean(); httpServer = new JournalNodeHttpServer(conf, this); httpServer.start(); httpServerURI = httpServer.getServerURI().toString(); rpcServer = new JournalNodeRpcServer(conf, this); rpcServer.start(); }
Example 3
Source File: HddsServerUtil.java From hadoop-ozone with Apache License 2.0 | 5 votes |
/** * Initialize hadoop metrics system for Ozone servers. * @param configuration OzoneConfiguration to use. * @param serverName The logical name of the server components. */ public static MetricsSystem initializeMetrics( OzoneConfiguration configuration, String serverName) { MetricsSystem metricsSystem = DefaultMetricsSystem.initialize(serverName); try { JvmMetrics.create(serverName, configuration.get(DFSConfigKeysLegacy.DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance()); } catch (MetricsException e) { LOG.info("Metrics source JvmMetrics already added to DataNode."); } return metricsSystem; }
Example 4
Source File: Nfs3Metrics.java From hadoop with Apache License 2.0 | 5 votes |
public static Nfs3Metrics create(Configuration conf, String gatewayName) { String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY); MetricsSystem ms = DefaultMetricsSystem.instance(); JvmMetrics jm = JvmMetrics.create(gatewayName, sessionId, ms); // Percentile measurement is [50th,75th,90th,95th,99th] currently int[] intervals = conf .getInts(NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY); return ms.register(new Nfs3Metrics(gatewayName, sessionId, intervals, jm)); }
Example 5
Source File: NameNodeMetrics.java From hadoop with Apache License 2.0 | 5 votes |
public static NameNodeMetrics create(Configuration conf, NamenodeRole r) { String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY); String processName = r.toString(); MetricsSystem ms = DefaultMetricsSystem.instance(); JvmMetrics jm = JvmMetrics.create(processName, sessionId, ms); // Percentile measurement is off by default, by watching no intervals int[] intervals = conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY); return ms.register(new NameNodeMetrics(processName, sessionId, intervals, jm)); }
Example 6
Source File: DataNodeMetrics.java From hadoop with Apache License 2.0 | 5 votes |
public static DataNodeMetrics create(Configuration conf, String dnName) { String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY); MetricsSystem ms = DefaultMetricsSystem.instance(); JvmMetrics jm = JvmMetrics.create("DataNode", sessionId, ms); String name = "DataNodeActivity-"+ (dnName.isEmpty() ? "UndefinedDataNodeName"+ DFSUtil.getRandom().nextInt() : dnName.replace(':', '-')); // Percentile measurement is off by default, by watching no intervals int[] intervals = conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY); return ms.register(name, null, new DataNodeMetrics(name, sessionId, intervals, jm)); }
Example 7
Source File: ProxyMetrics.java From nnproxy with Apache License 2.0 | 5 votes |
public static ProxyMetrics create(Configuration conf) { String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY); String processName = "NNPROXY"; MetricsSystem ms = DefaultMetricsSystem.instance(); JvmMetrics jm = JvmMetrics.create(processName, sessionId, ms); return ms.register(new ProxyMetrics(processName, sessionId, jm)); }
Example 8
Source File: Nfs3Metrics.java From big-c with Apache License 2.0 | 5 votes |
public static Nfs3Metrics create(Configuration conf, String gatewayName) { String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY); MetricsSystem ms = DefaultMetricsSystem.instance(); JvmMetrics jm = JvmMetrics.create(gatewayName, sessionId, ms); // Percentile measurement is [50th,75th,90th,95th,99th] currently int[] intervals = conf .getInts(NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY); return ms.register(new Nfs3Metrics(gatewayName, sessionId, intervals, jm)); }
Example 9
Source File: NameNodeMetrics.java From big-c with Apache License 2.0 | 5 votes |
public static NameNodeMetrics create(Configuration conf, NamenodeRole r) { String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY); String processName = r.toString(); MetricsSystem ms = DefaultMetricsSystem.instance(); JvmMetrics jm = JvmMetrics.create(processName, sessionId, ms); // Percentile measurement is off by default, by watching no intervals int[] intervals = conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY); return ms.register(new NameNodeMetrics(processName, sessionId, intervals, jm)); }
Example 10
Source File: DataNodeMetrics.java From big-c with Apache License 2.0 | 5 votes |
public static DataNodeMetrics create(Configuration conf, String dnName) { String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY); MetricsSystem ms = DefaultMetricsSystem.instance(); JvmMetrics jm = JvmMetrics.create("DataNode", sessionId, ms); String name = "DataNodeActivity-"+ (dnName.isEmpty() ? "UndefinedDataNodeName"+ DFSUtil.getRandom().nextInt() : dnName.replace(':', '-')); // Percentile measurement is off by default, by watching no intervals int[] intervals = conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY); return ms.register(name, null, new DataNodeMetrics(name, sessionId, intervals, jm)); }
Example 11
Source File: NodeManagerMetrics.java From hadoop with Apache License 2.0 | 4 votes |
static NodeManagerMetrics create(MetricsSystem ms) { JvmMetrics.create("NodeManager", null, ms); return ms.register(new NodeManagerMetrics()); }
Example 12
Source File: SecondaryNameNode.java From hadoop with Apache License 2.0 | 4 votes |
/** * Initialize SecondaryNameNode. */ private void initialize(final Configuration conf, CommandLineOpts commandLineOpts) throws IOException { final InetSocketAddress infoSocAddr = getHttpAddress(conf); final String infoBindAddress = infoSocAddr.getHostName(); UserGroupInformation.setConfiguration(conf); if (UserGroupInformation.isSecurityEnabled()) { SecurityUtil.login(conf, DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY, infoBindAddress); } // initiate Java VM metrics DefaultMetricsSystem.initialize("SecondaryNameNode"); JvmMetrics.create("SecondaryNameNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance()); // Create connection to the namenode. shouldRun = true; nameNodeAddr = NameNode.getServiceAddress(conf, true); this.conf = conf; this.namenode = NameNodeProxies.createNonHAProxy(conf, nameNodeAddr, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); // initialize checkpoint directories fsName = getInfoServer(); checkpointDirs = FSImage.getCheckpointDirs(conf, "/tmp/hadoop/dfs/namesecondary"); checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, "/tmp/hadoop/dfs/namesecondary"); checkpointImage = new CheckpointStorage(conf, checkpointDirs, checkpointEditsDirs); checkpointImage.recoverCreate(commandLineOpts.shouldFormat()); checkpointImage.deleteTempEdits(); namesystem = new FSNamesystem(conf, checkpointImage, true); // Disable quota checks namesystem.dir.disableQuotaChecks(); // Initialize other scheduling parameters from the configuration checkpointConf = new CheckpointConf(conf); final InetSocketAddress httpAddr = infoSocAddr; final String httpsAddrString = conf.getTrimmed( DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT); InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString); HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf, httpAddr, httpsAddr, "secondary", DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY); nameNodeStatusBeanName = MBeans.register("SecondaryNameNode", "SecondaryNameNodeInfo", this); infoServer = builder.build(); infoServer.setAttribute("secondary.name.node", this); infoServer.setAttribute("name.system.image", checkpointImage); infoServer.setAttribute(JspHelper.CURRENT_CONF, conf); infoServer.addInternalServlet("imagetransfer", ImageServlet.PATH_SPEC, ImageServlet.class, true); infoServer.start(); LOG.info("Web server init done"); HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf); int connIdx = 0; if (policy.isHttpEnabled()) { InetSocketAddress httpAddress = infoServer.getConnectorAddress(connIdx++); conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, NetUtils.getHostPortString(httpAddress)); } if (policy.isHttpsEnabled()) { InetSocketAddress httpsAddress = infoServer.getConnectorAddress(connIdx); conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, NetUtils.getHostPortString(httpsAddress)); } legacyOivImageDir = conf.get( DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY); LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs " + "(" + checkpointConf.getPeriod() / 60 + " min)"); LOG.info("Log Size Trigger :" + checkpointConf.getTxnCount() + " txns"); }
Example 13
Source File: NodeManagerMetrics.java From big-c with Apache License 2.0 | 4 votes |
static NodeManagerMetrics create(MetricsSystem ms) { JvmMetrics.create("NodeManager", null, ms); return ms.register(new NodeManagerMetrics()); }
Example 14
Source File: SecondaryNameNode.java From big-c with Apache License 2.0 | 4 votes |
/** * Initialize SecondaryNameNode. */ private void initialize(final Configuration conf, CommandLineOpts commandLineOpts) throws IOException { final InetSocketAddress infoSocAddr = getHttpAddress(conf); final String infoBindAddress = infoSocAddr.getHostName(); UserGroupInformation.setConfiguration(conf); if (UserGroupInformation.isSecurityEnabled()) { SecurityUtil.login(conf, DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY, infoBindAddress); } // initiate Java VM metrics DefaultMetricsSystem.initialize("SecondaryNameNode"); JvmMetrics.create("SecondaryNameNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance()); // Create connection to the namenode. shouldRun = true; nameNodeAddr = NameNode.getServiceAddress(conf, true); this.conf = conf; this.namenode = NameNodeProxies.createNonHAProxy(conf, nameNodeAddr, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); // initialize checkpoint directories fsName = getInfoServer(); checkpointDirs = FSImage.getCheckpointDirs(conf, "/tmp/hadoop/dfs/namesecondary"); checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, "/tmp/hadoop/dfs/namesecondary"); checkpointImage = new CheckpointStorage(conf, checkpointDirs, checkpointEditsDirs); checkpointImage.recoverCreate(commandLineOpts.shouldFormat()); checkpointImage.deleteTempEdits(); namesystem = new FSNamesystem(conf, checkpointImage, true); // Disable quota checks namesystem.dir.disableQuotaChecks(); // Initialize other scheduling parameters from the configuration checkpointConf = new CheckpointConf(conf); final InetSocketAddress httpAddr = infoSocAddr; final String httpsAddrString = conf.getTrimmed( DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT); InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString); HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf, httpAddr, httpsAddr, "secondary", DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY); nameNodeStatusBeanName = MBeans.register("SecondaryNameNode", "SecondaryNameNodeInfo", this); infoServer = builder.build(); infoServer.setAttribute("secondary.name.node", this); infoServer.setAttribute("name.system.image", checkpointImage); infoServer.setAttribute(JspHelper.CURRENT_CONF, conf); infoServer.addInternalServlet("imagetransfer", ImageServlet.PATH_SPEC, ImageServlet.class, true); infoServer.start(); LOG.info("Web server init done"); HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf); int connIdx = 0; if (policy.isHttpEnabled()) { InetSocketAddress httpAddress = infoServer.getConnectorAddress(connIdx++); conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, NetUtils.getHostPortString(httpAddress)); } if (policy.isHttpsEnabled()) { InetSocketAddress httpsAddress = infoServer.getConnectorAddress(connIdx); conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, NetUtils.getHostPortString(httpsAddress)); } legacyOivImageDir = conf.get( DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY); LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs " + "(" + checkpointConf.getPeriod() / 60 + " min)"); LOG.info("Log Size Trigger :" + checkpointConf.getTxnCount() + " txns"); }