Java Code Examples for org.apache.log4j.Logger#getRootLogger()
The following examples show how to use
org.apache.log4j.Logger#getRootLogger() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TaskLog.java From hadoop with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") public static synchronized void syncLogs() { // flush standard streams // System.out.flush(); System.err.flush(); // flush flushable appenders // final Logger rootLogger = Logger.getRootLogger(); flushAppenders(rootLogger); final Enumeration<Logger> allLoggers = rootLogger.getLoggerRepository(). getCurrentLoggers(); while (allLoggers.hasMoreElements()) { final Logger l = allLoggers.nextElement(); flushAppenders(l); } }
Example 2
Source File: DocumentSymbolProcessorTest.java From camel-language-server with Apache License 2.0 | 6 votes |
@Test void testNoExceptionWithJavaFile() throws Exception { final TestLogAppender appender = new TestLogAppender(); final Logger logger = Logger.getRootLogger(); logger.addAppender(appender); File f = new File("src/test/resources/workspace/camel.java"); try (FileInputStream fis = new FileInputStream(f)) { CamelLanguageServer camelLanguageServer = initializeLanguageServer(fis, ".java"); CompletableFuture<List<Either<SymbolInformation,DocumentSymbol>>> documentSymbolFor = getDocumentSymbolFor(camelLanguageServer); List<Either<SymbolInformation, DocumentSymbol>> symbolsInformation = documentSymbolFor.get(); assertThat(symbolsInformation).isEmpty(); for (LoggingEvent loggingEvent : appender.getLog()) { if (loggingEvent.getMessage() != null) { assertThat((String)loggingEvent.getMessage()).doesNotContain(DocumentSymbolProcessor.CANNOT_DETERMINE_DOCUMENT_SYMBOLS); } } } }
Example 3
Source File: Test_RemoteLoggingConfigurator.java From ats-framework with Apache License 2.0 | 6 votes |
@Test public void testNeedsApplyWithAppenderExpectTrue() { ActiveDbAppender appender = new ActiveDbAppender(); appender.setHost("test"); appender.setDatabase("test"); appender.setUser("test"); appender.setPassword("test"); Logger log = Logger.getRootLogger(); log.addAppender(appender); //construct the configurator - an appender is present RemoteLoggingConfigurator remoteLoggingConfig = new RemoteLoggingConfigurator(null, -1); //remove the appender, so the configurator will need to apply it log.removeAppender(appender); assertTrue(remoteLoggingConfig.needsApplying()); }
Example 4
Source File: CoasterPersistentService.java From swift-k with Apache License 2.0 | 5 votes |
@SuppressWarnings({ "rawtypes", "unchecked" }) protected static Appender getAppender(Class cls) { Logger root = Logger.getRootLogger(); Enumeration e = root.getAllAppenders(); while (e.hasMoreElements()) { Appender a = (Appender) e.nextElement(); if (cls.isAssignableFrom(a.getClass())) { return a; } } return null; }
Example 5
Source File: RMStarter.java From scheduling with GNU Affero General Public License v3.0 | 5 votes |
/** * Wrap existing appenders configured for the root logger in AsyncAppenders */ public static void overrideAppenders() { if (PAResourceManagerProperties.LOG4J_ASYNC_APPENDER_ENABLED.getValueAsBoolean()) { Logger rootLogger = Logger.getRootLogger(); Enumeration<?> en = rootLogger.getAllAppenders(); if (en != null) { List<AsyncAppender> newAppenders = new ArrayList<>(); List<String> appendersToRemove = new ArrayList<>(); int index = 0; while (en.hasMoreElements()) { Appender app = (Appender) en.nextElement(); if (app != null && !(app instanceof AsyncAppender)) { AsyncAppender asyncAppender = new AsyncAppender(); asyncAppender.setName("MainAsyncAppender_" + index); asyncAppender.setBufferSize(PAResourceManagerProperties.LOG4J_ASYNC_APPENDER_BUFFER_SIZE.getValueAsInt()); asyncAppender.addAppender(app); newAppenders.add(asyncAppender); appendersToRemove.add(app.getName()); index++; } } for (String appenderName : appendersToRemove) { rootLogger.removeAppender(appenderName); } for (Appender newAppender : newAppenders) { rootLogger.addAppender(newAppender); } } } }
Example 6
Source File: NotEqualsRuleTest.java From otroslogviewer with Apache License 2.0 | 5 votes |
/** * getRule with "level" and "info". */ @Test public void test3() { Stack<Object> stack = new Stack<>(); stack.push("level"); stack.push("info"); Rule rule = NotEqualsRule.getRule(stack); AssertJUnit.assertEquals(0, stack.size()); LoggingEvent event = new LoggingEvent("org.apache.log4j.Logger", Logger.getRootLogger(), System.currentTimeMillis(), Level.WARN, "Hello, World", null); AssertJUnit.assertTrue(rule.evaluate(Log4jUtil.translateLog4j(event), null)); }
Example 7
Source File: PartialTextMatchRuleTest.java From otroslogviewer with Apache License 2.0 | 5 votes |
/** * Check PartailTextMatchRule serialization. */ @Test public void test6() throws IOException, ClassNotFoundException { Stack<Object> stack = new Stack<>(); stack.push("msg"); stack.push("World"); Rule rule = (Rule) SerializationTestHelper.serializeClone(PartialTextMatchRule.getRule(stack)); AssertJUnit.assertEquals(0, stack.size()); LoggingEvent event = new LoggingEvent("org.apache.log4j.Logger", Logger.getRootLogger(), System.currentTimeMillis(), Level.INFO, "Hello, World", null); AssertJUnit.assertTrue(rule.evaluate(Log4jUtil.translateLog4j(event), null)); }
Example 8
Source File: ParserXMLFileHelperTest.java From camel-language-server with Apache License 2.0 | 5 votes |
@Test void testGetCamelComponentUri() throws Exception { final TestLogAppender appender = new TestLogAppender(); final Logger logger = Logger.getRootLogger(); logger.addAppender(appender); new ParserXMLFileHelper().getCamelComponentUri("uri=!!", 2); assertThat(appender.getLog().get(0).getMessage()).isEqualTo("Encountered an unsupported URI closure char !"); }
Example 9
Source File: SyslogAppenderTest.java From cacheonix-core with GNU Lesser General Public License v2.1 | 5 votes |
/** * Tests that append method drops messages below threshold. * Can't reach isSevereAsThreshold call in SyslogAppender.append * since it is checked in AppenderSkeleton.doAppend. */ public void testAppendBelowThreshold() { SyslogAppender appender = new SyslogAppender(); appender.setThreshold(Level.ERROR); appender.activateOptions(); Logger logger = Logger.getRootLogger(); logger.addAppender(appender); logger.info( "Should not be logged by SyslogAppenderTest.testAppendBelowThreshold."); }
Example 10
Source File: LoggingEventTest.java From cacheonix-core with GNU Lesser General Public License v2.1 | 5 votes |
/** * Serialize a logging event with an exception and check it against * a witness. * @throws Exception if exception during test. * */ public void testSerializationWithException() throws Exception { Logger root = Logger.getRootLogger(); Exception ex = new Exception("Don't panic"); LoggingEvent event = new LoggingEvent( root.getClass().getName(), root, Level.INFO, "Hello, world.", ex); // event.prepareForDeferredProcessing(); int[] skip = new int[] { 352, 353, 354, 355, 356 }; SerializationTestHelper.assertSerializationEquals( "witness/serialization/exception.bin", event, skip, 237); }
Example 11
Source File: TestReplicationPolicy.java From hadoop with Apache License 2.0 | 5 votes |
/** * In this testcase, it tries to choose more targets than available nodes and * check the result. * @throws Exception */ @Test public void testChooseTargetWithMoreThanAvailableNodes() throws Exception { // make data node 0 & 1 to be not qualified to choose: not enough disk space for(int i=0; i<2; i++) { updateHeartbeatWithUsage(dataNodes[i], 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } final LogVerificationAppender appender = new LogVerificationAppender(); final Logger logger = Logger.getRootLogger(); logger.addAppender(appender); // try to choose NUM_OF_DATANODES which is more than actually available // nodes. DatanodeStorageInfo[] targets = chooseTarget(NUM_OF_DATANODES); assertEquals(targets.length, NUM_OF_DATANODES - 2); final List<LoggingEvent> log = appender.getLog(); assertNotNull(log); assertFalse(log.size() == 0); final LoggingEvent lastLogEntry = log.get(log.size() - 1); assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel())); // Suppose to place replicas on each node but two data nodes are not // available for placing replica, so here we expect a short of 2 assertTrue(((String) lastLogEntry.getMessage()).contains("in need of 2")); resetHeartbeatForStorages(); }
Example 12
Source File: NotEqualsRuleTest.java From otroslogviewer with Apache License 2.0 | 5 votes |
/** * getRule with "msg". */ @Test public void test6() { Stack<Object> stack = new Stack<>(); stack.push("msg"); stack.push("Bonjour, Monde"); Rule rule = NotEqualsRule.getRule(stack); AssertJUnit.assertEquals(0, stack.size()); LoggingEvent event = new LoggingEvent("org.apache.log4j.Logger", Logger.getRootLogger(), System.currentTimeMillis(), Level.INFO, "Hello, World", null); AssertJUnit.assertTrue(rule.evaluate(Log4jUtil.translateLog4j(event), null)); }
Example 13
Source File: GridTestLog4jLogger.java From ignite with Apache License 2.0 | 5 votes |
/** * Creates new logger. If initialize parameter is {@code true} the Log4j * logger will be initialized with default console appender and {@code INFO} * log level. * * @param init If {@code true}, then a default console appender with * following pattern layout will be created: {@code %d{ISO8601} %-5p [%c{1}] %m%n}. * If {@code false}, then no implicit initialization will take place, * and {@code Log4j} should be configured prior to calling this * constructor. */ public GridTestLog4jLogger(boolean init) { impl = Logger.getRootLogger(); if (init) { // Implementation has already been inited, passing NULL. addConsoleAppenderIfNeeded(Level.INFO, null); quiet = quiet0; } else quiet = true; cfg = null; }
Example 14
Source File: OrRuleTest.java From otroslogviewer with Apache License 2.0 | 5 votes |
/** * Test Or of Level and Time. */ @Test public void test3() { Stack<Object> stack = new Stack<>(); stack.push(LevelEqualsRule.getRule("INFO")); stack.push(TimestampInequalityRule.getRule(">=", "2008-05-21 00:44:45")); Rule rule = OrRule.getRule(stack); AssertJUnit.assertEquals(0, stack.size()); Calendar cal = new GregorianCalendar(2008, 4, 21, 0, 45, 44); LoggingEvent event = new LoggingEvent("org.apache.log4j.Logger", Logger.getRootLogger(), cal.getTimeInMillis(), Level.INFO, "Hello, World", null); AssertJUnit.assertTrue(rule.evaluate(Log4jUtil.translateLog4j(event), null)); }
Example 15
Source File: CacheFIFOTestCase.java From siddhi with Apache License 2.0 | 4 votes |
@Test(description = "cacheFIFOTestCase2") // using query api and 2 primary keys public void cacheFIFOTestCase2() throws InterruptedException, SQLException { final TestAppenderToValidateLogsForCachingTests appender = new TestAppenderToValidateLogsForCachingTests(); final Logger logger = Logger.getRootLogger(); logger.setLevel(Level.DEBUG); logger.addAppender(appender); SiddhiManager siddhiManager = new SiddhiManager(); String streams = "" + "define stream StockStream (symbol string, price float, volume long); " + "define stream DeleteStockStream (symbol string, price float, volume long); " + "@Store(type=\"testStoreForCacheMiss\", @Cache(size=\"2\", cache.policy=\"FIFO\"))\n" + "@PrimaryKey(\'symbol\', \'price\') " + "define table StockTable (symbol string, price float, volume long); "; String query = "" + "@info(name = 'query1') " + "from StockStream " + "insert into StockTable ;" + "" + "@info(name = 'query2') " + "from DeleteStockStream " + "delete StockTable " + " on StockTable.symbol == symbol AND StockTable.price == price AND StockTable.volume == volume;"; SiddhiAppRuntime siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(streams + query); InputHandler stockStream = siddhiAppRuntime.getInputHandler("StockStream"); InputHandler deleteStockStream = siddhiAppRuntime.getInputHandler("DeleteStockStream"); siddhiAppRuntime.start(); deleteStockStream.send(new Object[]{"WSO2", 55.6f, 1L}); deleteStockStream.send(new Object[]{"IBM", 75.6f, 2L}); stockStream.send(new Object[]{"CISCO", 75.6f, 3L}); Thread.sleep(10); stockStream.send(new Object[]{"APPLE", 75.6f, 4L}); Thread.sleep(1000); Event[] events = siddhiAppRuntime.query("" + "from StockTable " + "on symbol == \"WSO2\" AND price == 55.6f "); EventPrinter.print(events); AssertJUnit.assertEquals(1, events.length); events = siddhiAppRuntime.query("" + "from StockTable " + "on symbol == \"CISCO\" AND price == 75.6f "); EventPrinter.print(events); AssertJUnit.assertEquals(1, events.length); final List<LoggingEvent> log = appender.getLog(); List<String> logMessages = new ArrayList<>(); for (LoggingEvent logEvent : log) { String message = String.valueOf(logEvent.getMessage()); if (message.contains(":")) { message = message.split(": ")[1]; } logMessages.add(message); } Assert.assertEquals(logMessages. contains("store table size is smaller than max cache. Sending results from cache"), false); Assert.assertEquals(logMessages.contains("store table size is bigger than cache."), true); Assert.assertEquals(Collections.frequency(logMessages, "store table size is bigger than cache."), 2); Assert.assertEquals(logMessages.contains("cache constraints satisfied. Checking cache"), true); Assert.assertEquals(Collections.frequency(logMessages, "cache constraints satisfied. Checking cache"), 2); Assert.assertEquals(logMessages.contains("cache hit. Sending results from cache"), false); Assert.assertEquals(logMessages.contains("cache miss. Loading from store"), true); Assert.assertEquals(Collections.frequency(logMessages, "cache miss. Loading from store"), 2); Assert.assertEquals(logMessages.contains("store also miss. sending null"), false); Assert.assertEquals(logMessages.contains("sending results from cache after loading from store"), true); Assert.assertEquals(Collections.frequency(logMessages, "sending results from cache after loading from store"), 2); Assert.assertEquals(logMessages.contains("sending results from store"), false); siddhiAppRuntime.shutdown(); }
Example 16
Source File: CacheLFUTestCase.java From siddhi with Apache License 2.0 | 4 votes |
@Test(description = "cacheLFUTestCase1") // using query api and 1 primary key & LFU public void cacheLFUTestCase1() throws InterruptedException, SQLException { final TestAppenderToValidateLogsForCachingTests appender = new TestAppenderToValidateLogsForCachingTests(); final Logger logger = Logger.getRootLogger(); logger.setLevel(Level.DEBUG); logger.addAppender(appender); SiddhiManager siddhiManager = new SiddhiManager(); String streams = "" + "define stream StockStream (symbol string, price float, volume long); " + "define stream DeleteStockStream (symbol string, price float, volume long); " + "@Store(type=\"testStoreForCacheMiss\", @Cache(size=\"2\", cache.policy=\"LFU\"))\n" + "@PrimaryKey(\'symbol\') " + "define table StockTable (symbol string, price float, volume long); "; String query = "" + "@info(name = 'query1') " + "from StockStream " + "insert into StockTable ;" + "" + "@info(name = 'query2') " + "from DeleteStockStream " + "delete StockTable " + " on StockTable.symbol == symbol AND StockTable.price == price AND StockTable.volume == volume;"; SiddhiAppRuntime siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(streams + query); InputHandler stockStream = siddhiAppRuntime.getInputHandler("StockStream"); InputHandler deleteStockStream = siddhiAppRuntime.getInputHandler("DeleteStockStream"); siddhiAppRuntime.start(); deleteStockStream.send(new Object[]{"WSO2", 55.6f, 1L}); deleteStockStream.send(new Object[]{"IBM", 75.6f, 2L}); stockStream.send(new Object[]{"CISCO", 75.6f, 3L}); Thread.sleep(10); stockStream.send(new Object[]{"APPLE", 75.6f, 4L}); Thread.sleep(1000); Event[] events = siddhiAppRuntime.query("" + "from StockTable " + "on symbol == \"WSO2\" "); EventPrinter.print(events); AssertJUnit.assertEquals(1, events.length); final List<LoggingEvent> log = appender.getLog(); List<String> logMessages = new ArrayList<>(); for (LoggingEvent logEvent : log) { String message = String.valueOf(logEvent.getMessage()); if (message.contains(":")) { message = message.split(": ")[1]; } logMessages.add(message); } Assert.assertEquals(logMessages. contains("store table size is smaller than max cache. Sending results from cache"), false); Assert.assertEquals(logMessages.contains("store table size is bigger than cache."), true); Assert.assertEquals(Collections.frequency(logMessages, "store table size is bigger than cache."), 1); Assert.assertEquals(logMessages.contains("cache constraints satisfied. Checking cache"), true); Assert.assertEquals(Collections.frequency(logMessages, "cache constraints satisfied. Checking cache"), 1); Assert.assertEquals(logMessages.contains("cache hit. Sending results from cache"), false); Assert.assertEquals(logMessages.contains("cache miss. Loading from store"), true); Assert.assertEquals(Collections.frequency(logMessages, "cache miss. Loading from store"), 1); Assert.assertEquals(logMessages.contains("store also miss. sending null"), false); Assert.assertEquals(logMessages.contains("sending results from cache after loading from store"), true); Assert.assertEquals(Collections.frequency(logMessages, "sending results from cache after loading from store"), 1); Assert.assertEquals(logMessages.contains("sending results from store"), false); siddhiAppRuntime.shutdown(); }
Example 17
Source File: Configuration.java From epcis with Apache License 2.0 | 4 votes |
private void setLogger() { // Log4j Setting BasicConfigurator.configure(); Logger.getRootLogger().setLevel(Level.INFO); Configuration.logger = Logger.getRootLogger(); }
Example 18
Source File: AkxLog4jManager.java From openzaly with Apache License 2.0 | 4 votes |
public static Level getLogLevel() { Logger rootLogger = Logger.getRootLogger(); return rootLogger.getLevel(); }
Example 19
Source File: LoggerConfig.java From olca-app with Mozilla Public License 2.0 | 4 votes |
public static void setUp() { Logger rootLogger = Logger.getRootLogger(); rootLogger.setLevel(Level.WARN); setUpOlcaLogger(); }
Example 20
Source File: CacheLRUTestCase.java From siddhi with Apache License 2.0 | 4 votes |
@Test(description = "cacheLRUTestCase1", dependsOnMethods = {"cacheLRUTestCase0"}) // using query api and 1 primary key & LRu public void cacheLRUTestCase1() throws InterruptedException, SQLException { final TestAppenderToValidateLogsForCachingTests appender = new TestAppenderToValidateLogsForCachingTests(); final Logger logger = Logger.getRootLogger(); logger.setLevel(Level.DEBUG); logger.addAppender(appender); SiddhiManager siddhiManager = new SiddhiManager(); String streams = "" + "define stream StockStream (symbol string, price float, volume long); " + "define stream DeleteStockStream (symbol string, price float, volume long); " + "@Store(type=\"testStoreForCacheMiss\", @Cache(size=\"2\", cache.policy=\"LRU\"))\n" + "@PrimaryKey(\'symbol\') " + "define table StockTable (symbol string, price float, volume long); "; String query = "" + "@info(name = 'query1') " + "from StockStream " + "insert into StockTable ;" + "" + "@info(name = 'query2') " + "from DeleteStockStream " + "delete StockTable " + " on StockTable.symbol == symbol AND StockTable.price == price AND StockTable.volume == volume;"; SiddhiAppRuntime siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(streams + query); InputHandler stockStream = siddhiAppRuntime.getInputHandler("StockStream"); InputHandler deleteStockStream = siddhiAppRuntime.getInputHandler("DeleteStockStream"); siddhiAppRuntime.start(); deleteStockStream.send(new Object[]{"WSO2", 55.6f, 1L}); deleteStockStream.send(new Object[]{"IBM", 75.6f, 2L}); stockStream.send(new Object[]{"CISCO", 75.6f, 3L}); Thread.sleep(10); stockStream.send(new Object[]{"APPLE", 75.6f, 4L}); Thread.sleep(1000); Event[] events = siddhiAppRuntime.query("" + "from StockTable " + "on symbol == \"WSO2\" "); EventPrinter.print(events); AssertJUnit.assertEquals(1, events.length); final List<LoggingEvent> log = appender.getLog(); List<String> logMessages = new ArrayList<>(); for (LoggingEvent logEvent : log) { String message = String.valueOf(logEvent.getMessage()); if (message.contains(":")) { message = message.split(": ")[1]; } logMessages.add(message); } Assert.assertEquals(logMessages. contains("store table size is smaller than max cache. Sending results from cache"), false); Assert.assertEquals(logMessages.contains("store table size is bigger than cache."), true); Assert.assertEquals(Collections.frequency(logMessages, "store table size is bigger than cache."), 1); Assert.assertEquals(logMessages.contains("cache constraints satisfied. Checking cache"), true); Assert.assertEquals(Collections.frequency(logMessages, "cache constraints satisfied. Checking cache"), 1); Assert.assertEquals(logMessages.contains("cache hit. Sending results from cache"), false); Assert.assertEquals(logMessages.contains("cache miss. Loading from store"), true); Assert.assertEquals(Collections.frequency(logMessages, "cache miss. Loading from store"), 1); Assert.assertEquals(logMessages.contains("store also miss. sending null"), false); Assert.assertEquals(logMessages.contains("sending results from cache after loading from store"), true); Assert.assertEquals(Collections.frequency(logMessages, "sending results from cache after loading from store"), 1); Assert.assertEquals(logMessages.contains("sending results from store"), false); siddhiAppRuntime.shutdown(); }