List of usage examples for java.util.logging Logger getLevel
public Level getLevel()
From source file:com.ellychou.todo.rest.service.SpringContextJerseyTest.java
/** * Register {@link Handler log handler} to the list of root loggers. *///from w w w.j a v a 2 s . c o m private void registerLogHandler() { final String recordLogLevel = getProperty(TestProperties.RECORD_LOG_LEVEL); final int recordLogLevelInt = Integer.valueOf(recordLogLevel); final Level level = Level.parse(recordLogLevel); logLevelMap.clear(); for (final Logger root : getRootLoggers()) { logLevelMap.put(root, root.getLevel()); if (root.getLevel().intValue() > recordLogLevelInt) { root.setLevel(level); } root.addHandler(getLogHandler()); } }
From source file:name.livitski.databag.cli.Launcher.java
public Level getLogLevel() { Logger root = Logger.getLogger(""); Level level = root.getLevel(); return level; }
From source file:com.yahoo.dba.perf.myperf.common.MyPerfContext.java
private void configureLogging() { Logger logger = Logger.getLogger(""); try {/*from w w w . jav a2s . c om*/ logger.setLevel(Level.parse(getLogLevel())); } catch (Exception ex) { logger.setLevel(Level.INFO); } try { for (Handler h : logger.getHandlers()) { if (h instanceof java.util.logging.ConsoleHandler) h.setLevel(Level.SEVERE); } String logRoot = System.getProperty("logPath", "."); java.util.logging.FileHandler fileHandler = new java.util.logging.FileHandler( logRoot + File.separatorChar + getLogPath(), this.logFileSize, this.logFileCount); fileHandler.setLevel(logger.getLevel()); fileHandler.setFormatter(new SimpleFormatter()); logger.addHandler(fileHandler); } catch (Exception ex) { ex.printStackTrace(); } }
From source file:org.apache.sling.commons.log.logback.integration.ITJULIntegration.java
/** * Checks the default settings. It runs the bundle with minimum dependencies *//* w w w . j a va 2 s . c om*/ @Test public void testJULLogging() throws Exception { java.util.logging.Logger julLogger = java.util.logging.Logger.getLogger("foo.jul.1"); org.slf4j.Logger slf4jLogger = LoggerFactory.getLogger("foo.jul.1"); assertEquals(java.util.logging.Level.FINEST, julLogger.getLevel()); assertTrue(slf4jLogger.isTraceEnabled()); // Now add an appender and see if JUL logs are handled TestAppender ta = new TestAppender(); Dictionary<String, Object> props = new Hashtable<String, Object>(); String[] loggers = { "foo.jul.1", }; ch.qos.logback.classic.Logger bar = (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(loggers[0]); bar.setLevel(Level.INFO); props.put("loggers", loggers); ServiceRegistration sr = bundleContext.registerService(Appender.class.getName(), ta, props); delay(); // Level should be INFO now assertEquals(java.util.logging.Level.INFO, julLogger.getLevel()); julLogger.info("Info message"); julLogger.fine("Fine message"); assertEquals(1, ta.events.size()); }
From source file:org.apache.sling.extensions.logback.integration.ITJULIntegration.java
/** * Checks the default settings. It runs the bundle with minimum dependencies *//*from w w w . j a va 2 s . c o m*/ @Test public void testJULLogging() throws Exception { java.util.logging.Logger julLogger = java.util.logging.Logger.getLogger("foo.jul.1"); org.slf4j.Logger slf4jLogger = LoggerFactory.getLogger("foo.jul.1"); assertEquals(java.util.logging.Level.FINEST, julLogger.getLevel()); assertTrue(slf4jLogger.isTraceEnabled()); //Now add an appender and see if JUL logs are handled TestAppender ta = new TestAppender(); Dictionary<String, Object> props = new Hashtable<String, Object>(); String[] loggers = { "foo.jul.1:INFO", }; props.put("loggers", loggers); ServiceRegistration sr = bundleContext.registerService(Appender.class.getName(), ta, props); delay(); //Level should be INFO now assertEquals(java.util.logging.Level.INFO, julLogger.getLevel()); julLogger.info("Info message"); julLogger.fine("Fine message"); assertEquals(1, ta.events.size()); }
From source file:org.archive.io.Arc2Warc.java
protected void transform(final ARCReader reader, final File warc) throws IOException { WARCWriter writer = null;/*w ww. j av a2s . c o m*/ // No point digesting. Digest is available after reading of ARC which // is too late for inclusion in WARC. reader.setDigest(false); try { BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(warc)); // Get the body of the first ARC record as a String so can dump it // into first record of WARC. final Iterator<ArchiveRecord> i = reader.iterator(); ARCRecord firstRecord = (ARCRecord) i.next(); ByteArrayOutputStream baos = new ByteArrayOutputStream((int) firstRecord.getHeader().getLength()); firstRecord.dump(baos); // Add ARC first record content as an ANVLRecord. ANVLRecord ar = new ANVLRecord(); ar.addLabelValue("Filedesc", baos.toString()); List<String> metadata = new ArrayList<String>(1); metadata.add(ar.toString()); // Now create the writer. If reader was compressed, lets write // a compressed WARC. writer = new WARCWriter(new AtomicInteger(), bos, warc, new WARCWriterPoolSettingsData("", "", -1, reader.isCompressed(), null, metadata, generator)); // Write a warcinfo record with description about how this WARC // was made. writer.writeWarcinfoRecord(warc.getName(), "Made from " + reader.getReaderIdentifier() + " by " + this.getClass().getName() + "/" + getRevision()); for (; i.hasNext();) { write(writer, (ARCRecord) i.next()); } } finally { if (reader != null) { reader.close(); } if (writer != null) { // I don't want the close being logged -- least, not w/o log of // an opening (and that'd be a little silly for simple script // like this). Currently, it logs at level INFO so that close // of files gets written to log files. Up the log level just // for the close. Logger l = Logger.getLogger(writer.getClass().getName()); Level oldLevel = l.getLevel(); l.setLevel(Level.WARNING); try { writer.close(); } finally { l.setLevel(oldLevel); } } } }
From source file:org.archive.io.Warc2Arc.java
protected void transform(final WARCReader reader, final ARCWriter writer) throws IOException, java.text.ParseException { // No point digesting. Digest is available after reading of ARC which // is too late for inclusion in WARC. reader.setDigest(false);/*from www .j a v a 2 s .com*/ // I don't want the close being logged -- least, not w/o log of // an opening (and that'd be a little silly for simple script // like this). Currently, it logs at level INFO so that close // of files gets written to log files. Up the log level just // for the close. Logger l = Logger.getLogger(writer.getClass().getName()); Level oldLevel = l.getLevel(); try { l.setLevel(Level.WARNING); for (final Iterator<ArchiveRecord> i = reader.iterator(); i.hasNext();) { WARCRecord r = (WARCRecord) i.next(); if (!isARCType(r.getHeader().getMimetype())) { continue; } if (r.getHeader().getContentBegin() <= 0) { // Otherwise, because length include Header-Line and // Named Fields, these will end up in the ARC unless there // is a non-zero content begin. continue; } String ip = (String) r.getHeader().getHeaderValue((WARCConstants.HEADER_KEY_IP)); long length = r.getHeader().getLength(); int offset = r.getHeader().getContentBegin(); // This mimetype is not exactly what you'd expect to find in // an ARC though technically its 'correct'. To get right one, // need to parse the HTTP Headers. Thats messy. Not doing for // now. String mimetype = r.getHeader().getMimetype(); // Clean out ISO time string '-', 'T', ':', and 'Z' characters. String t = r.getHeader().getDate().replaceAll("[-T:Z]", ""); long time = ArchiveUtils.getSecondsSinceEpoch(t).getTime(); writer.write(r.getHeader().getUrl(), mimetype, ip, time, (int) (length - offset), r); } } finally { if (reader != null) { reader.close(); } if (writer != null) { try { writer.close(); } finally { l.setLevel(oldLevel); } } } }
From source file:org.cloudifysource.esc.shell.commands.BootstrapCloud.java
private void limitLoggingLevel() { if (!this.verbose) { loggerStates.clear();//from w w w . ja v a 2 s . co m for (final String loggerName : NON_VERBOSE_LOGGERS) { final Logger provisioningLogger = Logger.getLogger(loggerName); final Level logLevelBefore = provisioningLogger.getLevel(); provisioningLogger.setLevel(Level.WARNING); loggerStates.put(loggerName, logLevelBefore); } } }
From source file:org.cloudifysource.esc.shell.commands.TeardownCloud.java
private void limitLoggingLevel() { if (!this.verbose) { loggerStates.clear();//from w w w . j av a2 s . c o m for (String loggerName : NON_VERBOSE_LOGGERS) { Logger provisioningLogger = Logger.getLogger(loggerName); Level logLevelBefore = provisioningLogger.getLevel(); provisioningLogger.setLevel(Level.WARNING); loggerStates.put(loggerName, logLevelBefore); } } }
From source file:org.ebayopensource.turmeric.eclipse.core.logging.SOALogger.java
/** * If the tracing is enabled, then the logging level will be setup accordingly. * * @param clazz the clazz// ww w .jav a 2 s.c o m * @return An instance of <code>LogManager</code> */ public static synchronized SOALogger getLogger(String clazz) { java.util.logging.LogManager manager = java.util.logging.LogManager.getLogManager(); Logger result = manager.getLogger(clazz); if (result instanceof SOALogger) { return (SOALogger) result; } else if (result != null) {//there is an existing logger instance if (loggers.keySet().contains(clazz)) { return loggers.get(clazz); } SOALogger logger = new SOALogger(clazz); logger.setLevel(result.getLevel()); logger.setFilter(result.getFilter()); logger.setParent(result.getParent()); logger.setUseParentHandlers(logger.getUseParentHandlers()); loggers.put(clazz, logger); return logger; } else {//can not find a logger, so let's create one. result = new SOALogger(clazz); } manager.addLogger(result); SOALogger logger = (SOALogger) manager.getLogger(clazz); try { ISOALoggingSystemProvider logSystemProvider = SOALoggingSystemExtensionRegistry.getInstance() .getLoggingSystemIDProvider(PluginLogDelegateHandler.getBuildSystemName()); if (logSystemProvider != null) { logSystemProvider.newLoggerCreated(logger); } } catch (Exception e) { //ignore the issue e.printStackTrace(); } return logger; }