org.apache.hadoop.util.ExitUtil Java Examples
The following examples show how to use
org.apache.hadoop.util.ExitUtil.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: JstormMaster.java From jstorm with Apache License 2.0 | 6 votes |
/** * @param args Command line args */ public static void main(String[] args) { boolean result = false; try { JstormMaster appMaster = new JstormMaster(); LOG.info("Initializing Jstorm Master!"); boolean doRun = appMaster.init(args); if (!doRun) { System.exit(JOYConstants.EXIT_SUCCESS); } appMaster.run(); // LRS won't finish at all result = appMaster.finish(); } catch (Throwable t) { LOG.fatal("Error running JstormMaster", t); LogManager.shutdown(); ExitUtil.terminate(JOYConstants.EXIT_FAIL1, t); } if (result) { LOG.info("Application Master completed successfully. exiting"); System.exit(JOYConstants.EXIT_SUCCESS); } else { LOG.info("Application Master failed. exiting"); System.exit(JOYConstants.EXIT_FAIL2); } }
Example #2
Source File: JobHistoryServer.java From XLearning with Apache License 2.0 | 6 votes |
static JobHistoryServer launchJobHistoryServer(String[] args) { Thread. setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG); JobHistoryServer jobHistoryServer = null; try { jobHistoryServer = new JobHistoryServer(); ShutdownHookManager.get().addShutdownHook( new CompositeServiceShutdownHook(jobHistoryServer), SHUTDOWN_HOOK_PRIORITY); YarnConfiguration conf = new YarnConfiguration(new JobConf()); new GenericOptionsParser(conf, args); jobHistoryServer.init(conf); jobHistoryServer.start(); } catch (Throwable t) { LOG.fatal("Error starting JobHistoryServer", t); ExitUtil.terminate(-1, "Error starting JobHistoryServer"); } return jobHistoryServer; }
Example #3
Source File: TestYarnUncaughtExceptionHandler.java From hadoop with Apache License 2.0 | 6 votes |
/** * <p> * Throw {@code Error} inside thread and * check {@code YarnUncaughtExceptionHandler} instance * <p> * Used {@code ExitUtil} class to avoid jvm exit through * {@code System.exit(-1) } * * @throws InterruptedException */ @Test public void testUncaughtExceptionHandlerWithError() throws InterruptedException { ExitUtil.disableSystemExit(); final YarnUncaughtExceptionHandler spyErrorHandler = spy(exHandler); final java.lang.Error error = new java.lang.Error("test-error"); final Thread errorThread = new Thread(new Runnable() { @Override public void run() { throw error; } }); errorThread.setUncaughtExceptionHandler(spyErrorHandler); assertSame(spyErrorHandler, errorThread.getUncaughtExceptionHandler()); errorThread.start(); errorThread.join(); verify(spyErrorHandler).uncaughtException(errorThread, error); }
Example #4
Source File: TezTaskRunner.java From incubator-tez with Apache License 2.0 | 6 votes |
@Override public synchronized void reportError(Throwable t) { if (t instanceof Error) { LOG.error("Exception of type Error during heartbeat, Exiting Now"); ExitUtil.terminate(-1, t); } else if (taskRunning.get()) { LOG.error("TaskReporter reported error", t); maybeRegisterFirstException(t); waitingThread.interrupt(); // A race is possible between a task succeeding, and a subsequent timed heartbeat failing. // These errors can be ignored, since a task can only succeed if the synchronous taskSucceeded // method does not throw an exception, in which case task success is registered with the AM. // Leave this handling to the next getTask / actual task. } else { LOG.info("Ignoring Communication failure since task with id=" + task.getTaskAttemptID() + " is already complete"); } }
Example #5
Source File: TimelineMetricStoreWatcherTest.java From ambari-metrics with Apache License 2.0 | 6 votes |
@Test public void testRunPositive() throws Exception { HBaseTimelineMetricsService metricStore = createNiceMock(HBaseTimelineMetricsService.class); expect(metricStore.putMetricsSkipCache(anyObject(TimelineMetrics.class))) .andReturn(new TimelinePutResponse()); // metric found expect(metricStore.getTimelineMetrics(EasyMock.<List<String>>anyObject(), EasyMock.<List<String>>anyObject(), anyObject(String.class), anyObject(String.class), anyObject(Long.class), anyObject(Long.class), eq(Precision.SECONDS), eq(1), eq(true), anyObject(TopNConfig.class), anyString())) .andReturn(null).anyTimes(); mockStatic(ExitUtil.class); replay(metricStore); TimelineMetricStoreWatcher timelineMetricStoreWatcher = new TimelineMetricStoreWatcher(metricStore, TimelineMetricConfiguration.getInstance()); timelineMetricStoreWatcher.run(); timelineMetricStoreWatcher.run(); timelineMetricStoreWatcher.run(); verify(metricStore); }
Example #6
Source File: TimelineMetricStoreWatcher.java From ambari-metrics with Apache License 2.0 | 6 votes |
@Override public void run() { if (checkMetricStore()) { failures = 0; if (LOG.isDebugEnabled()) { LOG.debug("Successfully got metrics from TimelineMetricStore"); } } else { LOG.info("Failed to get metrics from TimelineMetricStore, attempt = " + failures); failures++; } if (failures >= configuration.getTimelineMetricsServiceWatcherMaxFailures()) { String msg = "Error getting metrics from TimelineMetricStore. " + "Shutting down by TimelineMetricStoreWatcher."; LOG.fatal(msg); ExitUtil.terminate(-1, msg); } }
Example #7
Source File: ApplicationHistoryServer.java From hadoop with Apache License 2.0 | 6 votes |
static ApplicationHistoryServer launchAppHistoryServer(String[] args) { Thread .setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); StringUtils.startupShutdownMessage(ApplicationHistoryServer.class, args, LOG); ApplicationHistoryServer appHistoryServer = null; try { appHistoryServer = new ApplicationHistoryServer(); ShutdownHookManager.get().addShutdownHook( new CompositeServiceShutdownHook(appHistoryServer), SHUTDOWN_HOOK_PRIORITY); YarnConfiguration conf = new YarnConfiguration(); new GenericOptionsParser(conf, args); appHistoryServer.init(conf); appHistoryServer.start(); } catch (Throwable t) { LOG.fatal("Error starting ApplicationHistoryServer", t); ExitUtil.terminate(-1, "Error starting ApplicationHistoryServer"); } return appHistoryServer; }
Example #8
Source File: TestApplicationHistoryServer.java From hadoop with Apache License 2.0 | 6 votes |
@Test(timeout = 60000) public void testLaunch() throws Exception { ExitUtil.disableSystemExit(); ApplicationHistoryServer historyServer = null; try { // Not able to modify the config of this test case, // but others have been customized to avoid conflicts historyServer = ApplicationHistoryServer.launchAppHistoryServer(new String[0]); } catch (ExitUtil.ExitException e) { assertEquals(0, e.status); ExitUtil.resetFirstExitException(); fail(); } finally { if (historyServer != null) { historyServer.stop(); } } }
Example #9
Source File: TestApplicationHistoryServer.java From hadoop with Apache License 2.0 | 6 votes |
@Test(timeout = 60000) public void testLaunchWithArguments() throws Exception { ExitUtil.disableSystemExit(); ApplicationHistoryServer historyServer = null; try { // Not able to modify the config of this test case, // but others have been customized to avoid conflicts String[] args = new String[2]; args[0]="-D" + YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS + "=4000"; args[1]="-D" + YarnConfiguration.TIMELINE_SERVICE_TTL_MS + "=200"; historyServer = ApplicationHistoryServer.launchAppHistoryServer(args); Configuration conf = historyServer.getConfig(); assertEquals("4000", conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS)); assertEquals("200", conf.get(YarnConfiguration.TIMELINE_SERVICE_TTL_MS)); } catch (ExitUtil.ExitException e) { assertEquals(0, e.status); ExitUtil.resetFirstExitException(); fail(); } finally { if (historyServer != null) { historyServer.stop(); } } }
Example #10
Source File: ApplicationMaster.java From hadoop with Apache License 2.0 | 6 votes |
/** * @param args Command line args */ public static void main(String[] args) { boolean result = false; try { ApplicationMaster appMaster = new ApplicationMaster(); LOG.info("Initializing ApplicationMaster"); boolean doRun = appMaster.init(args); if (!doRun) { System.exit(0); } appMaster.run(); result = appMaster.finish(); } catch (Throwable t) { LOG.fatal("Error running ApplicationMaster", t); LogManager.shutdown(); ExitUtil.terminate(1, t); } if (result) { LOG.info("Application Master completed successfully. exiting"); System.exit(0); } else { LOG.info("Application Master failed. exiting"); System.exit(2); } }
Example #11
Source File: ReplicationManager.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * ReplicationMonitor thread runnable. This wakes up at configured * interval and processes all the containers in the system. */ private synchronized void run() { try { while (running) { final long start = Time.monotonicNow(); final Set<ContainerID> containerIds = containerManager.getContainerIDs(); containerIds.forEach(this::processContainer); LOG.info("Replication Monitor Thread took {} milliseconds for" + " processing {} containers.", Time.monotonicNow() - start, containerIds.size()); wait(conf.getInterval()); } } catch (Throwable t) { // When we get runtime exception, we should terminate SCM. LOG.error("Exception in Replication Monitor Thread.", t); ExitUtil.terminate(1, t); } }
Example #12
Source File: TestClusterId.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setUp() throws IOException { ExitUtil.disableSystemExit(); String baseDir = PathUtils.getTestDirName(getClass()); hdfsDir = new File(baseDir, "dfs/name"); if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) { throw new IOException("Could not delete test directory '" + hdfsDir + "'"); } LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath()); // as some tests might change these values we reset them to defaults before // every test StartupOption.FORMAT.setForceFormat(false); StartupOption.FORMAT.setInteractiveFormat(true); config = new Configuration(); config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath()); }
Example #13
Source File: TestClusterId.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setUp() throws IOException { ExitUtil.disableSystemExit(); String baseDir = PathUtils.getTestDirName(getClass()); hdfsDir = new File(baseDir, "dfs/name"); if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) { throw new IOException("Could not delete test directory '" + hdfsDir + "'"); } LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath()); // as some tests might change these values we reset them to defaults before // every test StartupOption.FORMAT.setForceFormat(false); StartupOption.FORMAT.setInteractiveFormat(true); config = new Configuration(); config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath()); }
Example #14
Source File: LoadExecutors.java From hadoop-ozone with Apache License 2.0 | 6 votes |
private void load(long runTimeMillis) { long threadID = Thread.currentThread().getId(); LOG.info("LOADGEN: Started IO Thread:{}.", threadID); long startTime = Time.monotonicNow(); while (Time.monotonicNow() - startTime < runTimeMillis) { LoadGenerator gen = generators.get(RandomUtils.nextInt(0, numGenerators)); try { gen.generateLoad(); } catch (Throwable t) { LOG.error("{} LOADGEN: Exiting due to exception", gen, t); ExitUtil.terminate(new ExitUtil.ExitException(1, t)); break; } } }
Example #15
Source File: JobHistoryServer.java From big-c with Apache License 2.0 | 6 votes |
static JobHistoryServer launchJobHistoryServer(String[] args) { Thread. setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG); JobHistoryServer jobHistoryServer = null; try { jobHistoryServer = new JobHistoryServer(); ShutdownHookManager.get().addShutdownHook( new CompositeServiceShutdownHook(jobHistoryServer), SHUTDOWN_HOOK_PRIORITY); YarnConfiguration conf = new YarnConfiguration(new JobConf()); new GenericOptionsParser(conf, args); jobHistoryServer.init(conf); jobHistoryServer.start(); } catch (Throwable t) { LOG.fatal("Error starting JobHistoryServer", t); ExitUtil.terminate(-1, "Error starting JobHistoryServer"); } return jobHistoryServer; }
Example #16
Source File: ApplicationMaster.java From big-c with Apache License 2.0 | 6 votes |
/** * @param args Command line args */ public static void main(String[] args) { boolean result = false; try { ApplicationMaster appMaster = new ApplicationMaster(); LOG.info("Initializing ApplicationMaster"); boolean doRun = appMaster.init(args); if (!doRun) { System.exit(0); } appMaster.run(); result = appMaster.finish(); } catch (Throwable t) { LOG.fatal("Error running ApplicationMaster", t); LogManager.shutdown(); ExitUtil.terminate(1, t); } if (result) { LOG.info("Application Master completed successfully. exiting"); System.exit(0); } else { LOG.info("Application Master failed. exiting"); System.exit(2); } }
Example #17
Source File: TestApplicationHistoryServer.java From big-c with Apache License 2.0 | 6 votes |
@Test(timeout = 60000) public void testLaunchWithArguments() throws Exception { ExitUtil.disableSystemExit(); ApplicationHistoryServer historyServer = null; try { // Not able to modify the config of this test case, // but others have been customized to avoid conflicts String[] args = new String[2]; args[0]="-D" + YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS + "=4000"; args[1]="-D" + YarnConfiguration.TIMELINE_SERVICE_TTL_MS + "=200"; historyServer = ApplicationHistoryServer.launchAppHistoryServer(args); Configuration conf = historyServer.getConfig(); assertEquals("4000", conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS)); assertEquals("200", conf.get(YarnConfiguration.TIMELINE_SERVICE_TTL_MS)); } catch (ExitUtil.ExitException e) { assertEquals(0, e.status); ExitUtil.resetFirstExitException(); fail(); } finally { if (historyServer != null) { historyServer.stop(); } } }
Example #18
Source File: TestApplicationHistoryServer.java From big-c with Apache License 2.0 | 6 votes |
@Test(timeout = 60000) public void testLaunch() throws Exception { ExitUtil.disableSystemExit(); ApplicationHistoryServer historyServer = null; try { // Not able to modify the config of this test case, // but others have been customized to avoid conflicts historyServer = ApplicationHistoryServer.launchAppHistoryServer(new String[0]); } catch (ExitUtil.ExitException e) { assertEquals(0, e.status); ExitUtil.resetFirstExitException(); fail(); } finally { if (historyServer != null) { historyServer.stop(); } } }
Example #19
Source File: ApplicationHistoryServer.java From big-c with Apache License 2.0 | 6 votes |
static ApplicationHistoryServer launchAppHistoryServer(String[] args) { Thread .setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); StringUtils.startupShutdownMessage(ApplicationHistoryServer.class, args, LOG); ApplicationHistoryServer appHistoryServer = null; try { appHistoryServer = new ApplicationHistoryServer(); ShutdownHookManager.get().addShutdownHook( new CompositeServiceShutdownHook(appHistoryServer), SHUTDOWN_HOOK_PRIORITY); YarnConfiguration conf = new YarnConfiguration(); new GenericOptionsParser(conf, args); appHistoryServer.init(conf); appHistoryServer.start(); } catch (Throwable t) { LOG.fatal("Error starting ApplicationHistoryServer", t); ExitUtil.terminate(-1, "Error starting ApplicationHistoryServer"); } return appHistoryServer; }
Example #20
Source File: NodeManager.java From big-c with Apache License 2.0 | 6 votes |
protected void shutDown() { new Thread() { @Override public void run() { try { NodeManager.this.stop(); } catch (Throwable t) { LOG.error("Error while shutting down NodeManager", t); } finally { if (shouldExitOnShutdownEvent && !ShutdownHookManager.get().isShutdownInProgress()) { ExitUtil.terminate(-1); } } } }.start(); }
Example #21
Source File: TestYarnUncaughtExceptionHandler.java From big-c with Apache License 2.0 | 6 votes |
/** * <p> * Throw {@code OutOfMemoryError} inside thread and * check {@code YarnUncaughtExceptionHandler} instance * <p> * Used {@code ExitUtil} class to avoid jvm exit through * {@code Runtime.getRuntime().halt(-1)} * * @throws InterruptedException */ @Test public void testUncaughtExceptionHandlerWithOutOfMemoryError() throws InterruptedException { ExitUtil.disableSystemHalt(); final YarnUncaughtExceptionHandler spyOomHandler = spy(exHandler); final OutOfMemoryError oomError = new OutOfMemoryError("out-of-memory-error"); final Thread oomThread = new Thread(new Runnable() { @Override public void run() { throw oomError; } }); oomThread.setUncaughtExceptionHandler(spyOomHandler); assertSame(spyOomHandler, oomThread.getUncaughtExceptionHandler()); oomThread.start(); oomThread.join(); verify(spyOomHandler).uncaughtException(oomThread, oomError); }
Example #22
Source File: TestYarnUncaughtExceptionHandler.java From big-c with Apache License 2.0 | 6 votes |
/** * <p> * Throw {@code Error} inside thread and * check {@code YarnUncaughtExceptionHandler} instance * <p> * Used {@code ExitUtil} class to avoid jvm exit through * {@code System.exit(-1) } * * @throws InterruptedException */ @Test public void testUncaughtExceptionHandlerWithError() throws InterruptedException { ExitUtil.disableSystemExit(); final YarnUncaughtExceptionHandler spyErrorHandler = spy(exHandler); final java.lang.Error error = new java.lang.Error("test-error"); final Thread errorThread = new Thread(new Runnable() { @Override public void run() { throw error; } }); errorThread.setUncaughtExceptionHandler(spyErrorHandler); assertSame(spyErrorHandler, errorThread.getUncaughtExceptionHandler()); errorThread.start(); errorThread.join(); verify(spyErrorHandler).uncaughtException(errorThread, error); }
Example #23
Source File: JobHistoryServer.java From hadoop with Apache License 2.0 | 6 votes |
static JobHistoryServer launchJobHistoryServer(String[] args) { Thread. setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG); JobHistoryServer jobHistoryServer = null; try { jobHistoryServer = new JobHistoryServer(); ShutdownHookManager.get().addShutdownHook( new CompositeServiceShutdownHook(jobHistoryServer), SHUTDOWN_HOOK_PRIORITY); YarnConfiguration conf = new YarnConfiguration(new JobConf()); new GenericOptionsParser(conf, args); jobHistoryServer.init(conf); jobHistoryServer.start(); } catch (Throwable t) { LOG.fatal("Error starting JobHistoryServer", t); ExitUtil.terminate(-1, "Error starting JobHistoryServer"); } return jobHistoryServer; }
Example #24
Source File: RMDelegationTokenSecretManager.java From big-c with Apache License 2.0 | 5 votes |
@Override protected void storeNewToken(RMDelegationTokenIdentifier identifier, long renewDate) { try { LOG.info("storing RMDelegation token with sequence number: " + identifier.getSequenceNumber()); rmContext.getStateStore().storeRMDelegationToken(identifier, renewDate); } catch (Exception e) { LOG.error("Error in storing RMDelegationToken with sequence number: " + identifier.getSequenceNumber()); ExitUtil.terminate(1, e); } }
Example #25
Source File: RMDelegationTokenSecretManager.java From big-c with Apache License 2.0 | 5 votes |
@Override protected void removeStoredToken(RMDelegationTokenIdentifier ident) throws IOException { try { LOG.info("removing RMDelegation token with sequence number: " + ident.getSequenceNumber()); rmContext.getStateStore().removeRMDelegationToken(ident); } catch (Exception e) { LOG.error("Error in removing RMDelegationToken with sequence number: " + ident.getSequenceNumber()); ExitUtil.terminate(1, e); } }
Example #26
Source File: RMDelegationTokenSecretManager.java From big-c with Apache License 2.0 | 5 votes |
@Override protected void removeStoredMasterKey(DelegationKey key) { try { LOG.info("removing master key with keyID " + key.getKeyId()); rmContext.getStateStore().removeRMDTMasterKey(key); } catch (Exception e) { LOG.error("Error in removing master key with KeyID: " + key.getKeyId()); ExitUtil.terminate(1, e); } }
Example #27
Source File: RMDelegationTokenSecretManager.java From big-c with Apache License 2.0 | 5 votes |
@Override protected void storeNewMasterKey(DelegationKey newKey) { try { LOG.info("storing master key with keyID " + newKey.getKeyId()); rmContext.getStateStore().storeRMDTMasterKey(newKey); } catch (Exception e) { LOG.error("Error in storing master key with KeyID: " + newKey.getKeyId()); ExitUtil.terminate(1, e); } }
Example #28
Source File: TestGridmixSubmission.java From hadoop with Apache License 2.0 | 5 votes |
@Test (timeout=100000) public void testMain() throws Exception { SecurityManager securityManager = System.getSecurityManager(); final ByteArrayOutputStream bytes = new ByteArrayOutputStream(); final PrintStream out = new PrintStream(bytes); final PrintStream oldOut = System.out; System.setErr(out); ExitUtil.disableSystemExit(); try { String[] argv = new String[0]; DebugGridmix.main(argv); } catch (ExitUtil.ExitException e) { assertEquals("ExitException", e.getMessage()); ExitUtil.resetFirstExitException(); } finally { System.setErr(oldOut); System.setSecurityManager(securityManager); } String print = bytes.toString(); // should be printed tip in std error stream assertTrue(print .contains("Usage: gridmix [-generate <MiB>] [-users URI] [-Dname=value ...] <iopath> <trace>")); assertTrue(print.contains("e.g. gridmix -generate 100m foo -")); }
Example #29
Source File: DelegationTokenFetcher.java From big-c with Apache License 2.0 | 5 votes |
private static void printUsage(PrintStream err) { err.println("fetchdt retrieves delegation tokens from the NameNode"); err.println(); err.println("fetchdt <opts> <token file>"); err.println("Options:"); err.println(" --webservice <url> Url to contact NN on"); err.println(" --renewer <name> Name of the delegation token renewer"); err.println(" --cancel Cancel the delegation token"); err.println(" --renew Renew the delegation token. Delegation " + "token must have been fetched using the --renewer <name> option."); err.println(" --print Print the delegation token"); err.println(); GenericOptionsParser.printGenericCommandUsage(err); ExitUtil.terminate(1); }
Example #30
Source File: Gridmix.java From hadoop with Apache License 2.0 | 5 votes |
public static void main(String[] argv) throws Exception { int res = -1; try { res = ToolRunner.run(new Configuration(), new Gridmix(argv), argv); } finally { ExitUtil.terminate(res); } }