Java Code Examples for org.apache.hadoop.metrics2.lib.DefaultMetricsSystem#initialize()
The following examples show how to use
org.apache.hadoop.metrics2.lib.DefaultMetricsSystem#initialize() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: JournalNode.java From hadoop with Apache License 2.0 | 6 votes |
/** * Start listening for edits via RPC. */ public void start() throws IOException { Preconditions.checkState(!isStarted(), "JN already running"); validateAndCreateJournalDir(localDir); DefaultMetricsSystem.initialize("JournalNode"); JvmMetrics.create("JournalNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance()); InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf); SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); registerJNMXBean(); httpServer = new JournalNodeHttpServer(conf, this); httpServer.start(); httpServerURI = httpServer.getServerURI().toString(); rpcServer = new JournalNodeRpcServer(conf, this); rpcServer.start(); }
Example 2
Source File: JournalNode.java From big-c with Apache License 2.0 | 6 votes |
/** * Start listening for edits via RPC. */ public void start() throws IOException { Preconditions.checkState(!isStarted(), "JN already running"); validateAndCreateJournalDir(localDir); DefaultMetricsSystem.initialize("JournalNode"); JvmMetrics.create("JournalNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance()); InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf); SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); registerJNMXBean(); httpServer = new JournalNodeHttpServer(conf, this); httpServer.start(); httpServerURI = httpServer.getServerURI().toString(); rpcServer = new JournalNodeRpcServer(conf, this); rpcServer.start(); }
Example 3
Source File: BaseSourceImpl.java From hbase with Apache License 2.0 | 5 votes |
synchronized void init(String name) { if (inited) { return; } inited = true; DefaultMetricsSystem.initialize(HBASE_METRICS_SYSTEM_NAME); JvmMetrics.initSingleton(name, ""); // initialize hbase-metrics module based metric system as well. GlobalMetricRegistriesSource // initialization depends on the metric system being already initialized, that is why we are // doing it here. Once BaseSourceSourceImpl is removed, we should do the initialization of // these elsewhere. GlobalMetricRegistriesAdapter.init(); }
Example 4
Source File: RpcProgramNfs3.java From big-c with Apache License 2.0 | 5 votes |
public static RpcProgramNfs3 createRpcProgramNfs3(NfsConfiguration config, DatagramSocket registrationSocket, boolean allowInsecurePorts) throws IOException { DefaultMetricsSystem.initialize("Nfs3"); String displayName = DNS.getDefaultHost("default", "default") + config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT); metrics = Nfs3Metrics.create(config, displayName); return new RpcProgramNfs3(config, registrationSocket, allowInsecurePorts); }
Example 5
Source File: QueryMetricsFacade.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
public static void init() { enabled = KylinConfig.getInstanceFromEnv().getQueryMetricsEnabled(); if (!enabled) return; DefaultMetricsSystem.initialize("Kylin"); }
Example 6
Source File: JobHistoryServer.java From XLearning with Apache License 2.0 | 5 votes |
@Override protected void serviceStart() throws Exception { DefaultMetricsSystem.initialize("JobHistoryServer"); JvmMetrics.initSingleton("JobHistoryServer", null); super.serviceStart(); deleteLogManager = new Thread(new deleteLogMonitor()); deleteLogManager.setName("Log-delete-monitor"); deleteLogManager.setDaemon(true); deleteLogManager.start(); }
Example 7
Source File: RpcProgramNfs3.java From hadoop with Apache License 2.0 | 5 votes |
public static RpcProgramNfs3 createRpcProgramNfs3(NfsConfiguration config, DatagramSocket registrationSocket, boolean allowInsecurePorts) throws IOException { DefaultMetricsSystem.initialize("Nfs3"); String displayName = DNS.getDefaultHost("default", "default") + config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT); metrics = Nfs3Metrics.create(config, displayName); return new RpcProgramNfs3(config, registrationSocket, allowInsecurePorts); }
Example 8
Source File: QueryMetricsFacade.java From kylin with Apache License 2.0 | 5 votes |
public static void init() { enabled = KylinConfig.getInstanceFromEnv().getQueryMetricsEnabled(); if (!enabled) return; DefaultMetricsSystem.initialize("Kylin"); }
Example 9
Source File: TestClusterMetrics.java From hadoop with Apache License 2.0 | 4 votes |
@Before public void setup() { DefaultMetricsSystem.initialize("ResourceManager"); metrics = ClusterMetrics.getMetrics(); }
Example 10
Source File: NodeManager.java From big-c with Apache License 2.0 | 4 votes |
@Override protected void serviceInit(Configuration conf) throws Exception { conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true); rmWorkPreservingRestartEnabled = conf.getBoolean(YarnConfiguration .RM_WORK_PRESERVING_RECOVERY_ENABLED, YarnConfiguration.DEFAULT_RM_WORK_PRESERVING_RECOVERY_ENABLED); initAndStartRecoveryStore(conf); NMContainerTokenSecretManager containerTokenSecretManager = new NMContainerTokenSecretManager(conf, nmStore); NMTokenSecretManagerInNM nmTokenSecretManager = new NMTokenSecretManagerInNM(nmStore); recoverTokens(nmTokenSecretManager, containerTokenSecretManager); this.aclsManager = new ApplicationACLsManager(conf); ContainerExecutor exec = ReflectionUtils.newInstance( conf.getClass(YarnConfiguration.NM_CONTAINER_EXECUTOR, DefaultContainerExecutor.class, ContainerExecutor.class), conf); try { exec.init(); } catch (IOException e) { throw new YarnRuntimeException("Failed to initialize container executor", e); } DeletionService del = createDeletionService(exec); addService(del); // NodeManager level dispatcher this.dispatcher = new AsyncDispatcher(); this.coresManager = new CoresManagerImpl(); this.coresManager.init(conf); nodeHealthChecker = new NodeHealthCheckerService(); addService(nodeHealthChecker); dirsHandler = nodeHealthChecker.getDiskHandler(); this.context = createNMContext(containerTokenSecretManager, nmTokenSecretManager, nmStore, coresManager); nodeStatusUpdater = createNodeStatusUpdater(context, dispatcher, nodeHealthChecker); NodeResourceMonitor nodeResourceMonitor = createNodeResourceMonitor(); addService(nodeResourceMonitor); containerManager = createContainerManager(context, exec, del, nodeStatusUpdater, this.aclsManager, dirsHandler); addService(containerManager); ((NMContext) context).setContainerManager(containerManager); WebServer webServer = createWebServer(context, containerManager .getContainersMonitor(), this.aclsManager, dirsHandler); addService(webServer); ((NMContext) context).setWebServer(webServer); dispatcher.register(ContainerManagerEventType.class, containerManager); dispatcher.register(NodeManagerEventType.class, this); addService(dispatcher); DefaultMetricsSystem.initialize("NodeManager"); // StatusUpdater should be added last so that it get started last // so that we make sure everything is up before registering with RM. addService(nodeStatusUpdater); super.serviceInit(conf); // TODO add local dirs to del }
Example 11
Source File: MRAppMaster.java From big-c with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") @Override protected void serviceStart() throws Exception { amInfos = new LinkedList<AMInfo>(); completedTasksFromPreviousRun = new HashMap<TaskId, TaskInfo>(); processRecovery(); // Current an AMInfo for the current AM generation. AMInfo amInfo = MRBuilderUtils.newAMInfo(appAttemptID, startTime, containerID, nmHost, nmPort, nmHttpPort); // /////////////////// Create the job itself. job = createJob(getConfig(), forcedState, shutDownMessage); // End of creating the job. // Send out an MR AM inited event for all previous AMs. for (AMInfo info : amInfos) { dispatcher.getEventHandler().handle( new JobHistoryEvent(job.getID(), new AMStartedEvent(info .getAppAttemptId(), info.getStartTime(), info.getContainerId(), info.getNodeManagerHost(), info.getNodeManagerPort(), info .getNodeManagerHttpPort(), appSubmitTime))); } // Send out an MR AM inited event for this AM. dispatcher.getEventHandler().handle( new JobHistoryEvent(job.getID(), new AMStartedEvent(amInfo .getAppAttemptId(), amInfo.getStartTime(), amInfo.getContainerId(), amInfo.getNodeManagerHost(), amInfo.getNodeManagerPort(), amInfo .getNodeManagerHttpPort(), this.forcedState == null ? null : this.forcedState.toString(), appSubmitTime))); amInfos.add(amInfo); // metrics system init is really init & start. // It's more test friendly to put it here. DefaultMetricsSystem.initialize("MRAppMaster"); boolean initFailed = false; if (!errorHappenedShutDown) { // create a job event for job intialization JobEvent initJobEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT); // Send init to the job (this does NOT trigger job execution) // This is a synchronous call, not an event through dispatcher. We want // job-init to be done completely here. jobEventDispatcher.handle(initJobEvent); // If job is still not initialized, an error happened during // initialization. Must complete starting all of the services so failure // events can be processed. initFailed = (((JobImpl)job).getInternalState() != JobStateInternal.INITED); // JobImpl's InitTransition is done (call above is synchronous), so the // "uber-decision" (MR-1220) has been made. Query job and switch to // ubermode if appropriate (by registering different container-allocator // and container-launcher services/event-handlers). if (job.isUber()) { speculatorEventDispatcher.disableSpeculation(); LOG.info("MRAppMaster uberizing job " + job.getID() + " in local container (\"uber-AM\") on node " + nmHost + ":" + nmPort + "."); } else { // send init to speculator only for non-uber jobs. // This won't yet start as dispatcher isn't started yet. dispatcher.getEventHandler().handle( new SpeculatorEvent(job.getID(), clock.getTime())); LOG.info("MRAppMaster launching normal, non-uberized, multi-container " + "job " + job.getID() + "."); } // Start ClientService here, since it's not initialized if // errorHappenedShutDown is true clientService.start(); } //start all the components super.serviceStart(); // finally set the job classloader MRApps.setClassLoader(jobClassLoader, getConfig()); if (initFailed) { JobEvent initFailedEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT_FAILED); jobEventDispatcher.handle(initFailedEvent); } else { // All components have started, start the job. startJobs(); } }
Example 12
Source File: TestClusterMetrics.java From big-c with Apache License 2.0 | 4 votes |
@Before public void setup() { DefaultMetricsSystem.initialize("ResourceManager"); metrics = ClusterMetrics.getMetrics(); }
Example 13
Source File: XceiverClientMetrics.java From hadoop-ozone with Apache License 2.0 | 4 votes |
public static XceiverClientMetrics create() { DefaultMetricsSystem.initialize(SOURCE_NAME); MetricsSystem ms = DefaultMetricsSystem.instance(); return ms.register(SOURCE_NAME, "Storage Container Client Metrics", new XceiverClientMetrics()); }
Example 14
Source File: TestNodeManagerMetrics.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testNames() { DefaultMetricsSystem.initialize("NodeManager"); NodeManagerMetrics metrics = NodeManagerMetrics.create(); Resource total = Records.newRecord(Resource.class); total.setMemory(8*GiB); total.setVirtualCores(16); total.setGpuCores(16); Resource resource = Records.newRecord(Resource.class); resource.setMemory(512); //512MiB resource.setVirtualCores(2); resource.setGpuCores(1); metrics.addResource(total); for (int i = 10; i-- > 0;) { // allocate 10 containers(allocatedGB: 5GiB, availableGB: 3GiB) metrics.launchedContainer(); metrics.allocateContainer(resource); } metrics.initingContainer(); metrics.endInitingContainer(); metrics.runningContainer(); metrics.endRunningContainer(); // Releasing 3 containers(allocatedGB: 3.5GiB, availableGB: 4.5GiB) metrics.completedContainer(); metrics.releaseContainer(resource); metrics.failedContainer(); metrics.releaseContainer(resource); metrics.killedContainer(); metrics.releaseContainer(resource); metrics.initingContainer(); metrics.runningContainer(); Assert.assertTrue(!metrics.containerLaunchDuration.changed()); metrics.addContainerLaunchDuration(1); Assert.assertTrue(metrics.containerLaunchDuration.changed()); // availableGB is expected to be floored, // while allocatedGB is expected to be ceiled. // allocatedGB: 3.5GB allocated memory is shown as 4GB // availableGB: 4.5GB available memory is shown as 4GB checkMetrics(10, 1, 1, 1, 1, 1, 4, 7, 4, 14, 2, 7, 9); }
Example 15
Source File: NodeManager.java From hadoop with Apache License 2.0 | 4 votes |
@Override protected void serviceInit(Configuration conf) throws Exception { conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true); rmWorkPreservingRestartEnabled = conf.getBoolean(YarnConfiguration .RM_WORK_PRESERVING_RECOVERY_ENABLED, YarnConfiguration.DEFAULT_RM_WORK_PRESERVING_RECOVERY_ENABLED); initAndStartRecoveryStore(conf); NMContainerTokenSecretManager containerTokenSecretManager = new NMContainerTokenSecretManager(conf, nmStore); NMTokenSecretManagerInNM nmTokenSecretManager = new NMTokenSecretManagerInNM(nmStore); recoverTokens(nmTokenSecretManager, containerTokenSecretManager); this.aclsManager = new ApplicationACLsManager(conf); ContainerExecutor exec = ReflectionUtils.newInstance( conf.getClass(YarnConfiguration.NM_CONTAINER_EXECUTOR, DefaultContainerExecutor.class, ContainerExecutor.class), conf); try { exec.init(); } catch (IOException e) { throw new YarnRuntimeException("Failed to initialize container executor", e); } DeletionService del = createDeletionService(exec); addService(del); // NodeManager level dispatcher this.dispatcher = new AsyncDispatcher(); dirsHandler = new LocalDirsHandlerService(metrics); nodeHealthChecker = new NodeHealthCheckerService( getNodeHealthScriptRunner(conf), dirsHandler); addService(nodeHealthChecker); this.context = createNMContext(containerTokenSecretManager, nmTokenSecretManager, nmStore); nodeStatusUpdater = createNodeStatusUpdater(context, dispatcher, nodeHealthChecker); NodeResourceMonitor nodeResourceMonitor = createNodeResourceMonitor(); addService(nodeResourceMonitor); containerManager = createContainerManager(context, exec, del, nodeStatusUpdater, this.aclsManager, dirsHandler); addService(containerManager); ((NMContext) context).setContainerManager(containerManager); WebServer webServer = createWebServer(context, containerManager .getContainersMonitor(), this.aclsManager, dirsHandler); addService(webServer); ((NMContext) context).setWebServer(webServer); dispatcher.register(ContainerManagerEventType.class, containerManager); dispatcher.register(NodeManagerEventType.class, this); addService(dispatcher); DefaultMetricsSystem.initialize("NodeManager"); // StatusUpdater should be added last so that it get started last // so that we make sure everything is up before registering with RM. addService(nodeStatusUpdater); super.serviceInit(conf); // TODO add local dirs to del }
Example 16
Source File: TestNodeManagerMetrics.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testNames() { DefaultMetricsSystem.initialize("NodeManager"); NodeManagerMetrics metrics = NodeManagerMetrics.create(); Resource total = Records.newRecord(Resource.class); total.setMemory(8*GiB); total.setVirtualCores(16); Resource resource = Records.newRecord(Resource.class); resource.setMemory(512); //512MiB resource.setVirtualCores(2); metrics.addResource(total); for (int i = 10; i-- > 0;) { // allocate 10 containers(allocatedGB: 5GiB, availableGB: 3GiB) metrics.launchedContainer(); metrics.allocateContainer(resource); } metrics.initingContainer(); metrics.endInitingContainer(); metrics.runningContainer(); metrics.endRunningContainer(); // Releasing 3 containers(allocatedGB: 3.5GiB, availableGB: 4.5GiB) metrics.completedContainer(); metrics.releaseContainer(resource); metrics.failedContainer(); metrics.releaseContainer(resource); metrics.killedContainer(); metrics.releaseContainer(resource); metrics.initingContainer(); metrics.runningContainer(); Assert.assertTrue(!metrics.containerLaunchDuration.changed()); metrics.addContainerLaunchDuration(1); Assert.assertTrue(metrics.containerLaunchDuration.changed()); // availableGB is expected to be floored, // while allocatedGB is expected to be ceiled. // allocatedGB: 3.5GB allocated memory is shown as 4GB // availableGB: 4.5GB available memory is shown as 4GB checkMetrics(10, 1, 1, 1, 1, 1, 4, 7, 4, 14, 2); }
Example 17
Source File: GlobalMetricRegistriesAdapter.java From phoenix with Apache License 2.0 | 4 votes |
private GlobalMetricRegistriesAdapter() { DefaultMetricsSystem.initialize("Phoenix"); JvmMetrics.initSingleton("Phoenix", ""); }
Example 18
Source File: TestEventWatcher.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Test public void testMetrics() throws InterruptedException { DefaultMetricsSystem.initialize("test"); EventQueue queue = new EventQueue(); EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent> replicationWatcher = createEventWatcher(); EventHandlerStub<UnderreplicatedEvent> underReplicatedEvents = new EventHandlerStub<>(); queue.addHandler(UNDER_REPLICATED, underReplicatedEvents); replicationWatcher.start(queue); //send 3 event to track 3 in-progress activity UnderreplicatedEvent event1 = new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1"); UnderreplicatedEvent event2 = new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C2"); UnderreplicatedEvent event3 = new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1"); queue.fireEvent(WATCH_UNDER_REPLICATED, event1); queue.fireEvent(WATCH_UNDER_REPLICATED, event2); queue.fireEvent(WATCH_UNDER_REPLICATED, event3); //1st event is completed, don't need to track any more ReplicationCompletedEvent event1Completed = new ReplicationCompletedEvent(event1.id, "C1", "D1"); queue.fireEvent(REPLICATION_COMPLETED, event1Completed); //lease manager timeout = 2000L Thread.sleep(3 * 2000L); queue.processAll(2000L); //until now: 3 in-progress activities are tracked with three // UnderreplicatedEvents. The first one is completed, the remaining two // are timed out (as the timeout -- defined in the lease manager -- is // 2000ms). EventWatcherMetrics metrics = replicationWatcher.getMetrics(); //3 events are received Assert.assertEquals(3, metrics.getTrackedEvents().value()); //completed + timed out = all messages Assert.assertEquals( "number of timed out and completed messages should be the same as the" + " all messages", metrics.getTrackedEvents().value(), metrics.getCompletedEvents().value() + metrics.getTimedOutEvents() .value()); //_at least_ two are timed out. Assert.assertTrue("At least two events should be timed out.", metrics.getTimedOutEvents().value() >= 2); DefaultMetricsSystem.shutdown(); }
Example 19
Source File: TestEventQueue.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Before public void startEventQueue() { DefaultMetricsSystem.initialize(getClass().getSimpleName()); queue = new EventQueue(); }
Example 20
Source File: SecondaryNameNode.java From big-c with Apache License 2.0 | 4 votes |
/** * Initialize SecondaryNameNode. */ private void initialize(final Configuration conf, CommandLineOpts commandLineOpts) throws IOException { final InetSocketAddress infoSocAddr = getHttpAddress(conf); final String infoBindAddress = infoSocAddr.getHostName(); UserGroupInformation.setConfiguration(conf); if (UserGroupInformation.isSecurityEnabled()) { SecurityUtil.login(conf, DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY, infoBindAddress); } // initiate Java VM metrics DefaultMetricsSystem.initialize("SecondaryNameNode"); JvmMetrics.create("SecondaryNameNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance()); // Create connection to the namenode. shouldRun = true; nameNodeAddr = NameNode.getServiceAddress(conf, true); this.conf = conf; this.namenode = NameNodeProxies.createNonHAProxy(conf, nameNodeAddr, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); // initialize checkpoint directories fsName = getInfoServer(); checkpointDirs = FSImage.getCheckpointDirs(conf, "/tmp/hadoop/dfs/namesecondary"); checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, "/tmp/hadoop/dfs/namesecondary"); checkpointImage = new CheckpointStorage(conf, checkpointDirs, checkpointEditsDirs); checkpointImage.recoverCreate(commandLineOpts.shouldFormat()); checkpointImage.deleteTempEdits(); namesystem = new FSNamesystem(conf, checkpointImage, true); // Disable quota checks namesystem.dir.disableQuotaChecks(); // Initialize other scheduling parameters from the configuration checkpointConf = new CheckpointConf(conf); final InetSocketAddress httpAddr = infoSocAddr; final String httpsAddrString = conf.getTrimmed( DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT); InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString); HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf, httpAddr, httpsAddr, "secondary", DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY); nameNodeStatusBeanName = MBeans.register("SecondaryNameNode", "SecondaryNameNodeInfo", this); infoServer = builder.build(); infoServer.setAttribute("secondary.name.node", this); infoServer.setAttribute("name.system.image", checkpointImage); infoServer.setAttribute(JspHelper.CURRENT_CONF, conf); infoServer.addInternalServlet("imagetransfer", ImageServlet.PATH_SPEC, ImageServlet.class, true); infoServer.start(); LOG.info("Web server init done"); HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf); int connIdx = 0; if (policy.isHttpEnabled()) { InetSocketAddress httpAddress = infoServer.getConnectorAddress(connIdx++); conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, NetUtils.getHostPortString(httpAddress)); } if (policy.isHttpsEnabled()) { InetSocketAddress httpsAddress = infoServer.getConnectorAddress(connIdx); conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, NetUtils.getHostPortString(httpsAddress)); } legacyOivImageDir = conf.get( DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY); LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs " + "(" + checkpointConf.getPeriod() / 60 + " min)"); LOG.info("Log Size Trigger :" + checkpointConf.getTxnCount() + " txns"); }