List of usage examples for java.lang.management ManagementFactory getPlatformMBeanServer
public static synchronized MBeanServer getPlatformMBeanServer()
From source file:org.sakaiproject.kernel.component.core.SharedClassLoaderContainer.java
/** * Create a shared classloader object./*from w w w. j a v a 2 s . c o m*/ * * @param kernel * the kernel to connect to. * @param shutdownService * the shutdown service. * @throws JMRuntimeException * @throws JMException * @throws InvalidTargetObjectTypeException */ @Inject public SharedClassLoaderContainer(Kernel kernel, ShutdownService shutdownService, SharedClassLoader classLoader) throws JMRuntimeException, JMException, InvalidTargetObjectTypeException { this.sharedClassLoader = classLoader; MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); RequiredModelMBean model = new RequiredModelMBean(createMBeanInfo()); model.setManagedResource(this, "objectReference"); ObjectName common = new ObjectName(CommonObject.MBEAN_COMMON + ".sharedclassloader"); mbs.registerMBean(model, common); // Explicit register this container to be shutdown, we dont want this // container exposed as a service, so we register directly. Within the // same module, this approach should be taken to give the IoC container // a dependency graph. shutdownService.register(this); }
From source file:org.apache.hadoop.hdfs.server.namenode.ha.TestHAMetrics.java
@Test(timeout = 300000) public void testHAMetrics() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, Integer.MAX_VALUE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(1).build();//from w w w . ja v a 2 s. com FileSystem fs = null; try { cluster.waitActive(); FSNamesystem nn0 = cluster.getNamesystem(0); FSNamesystem nn1 = cluster.getNamesystem(1); assertEquals(nn0.getHAState(), "standby"); assertTrue(0 < nn0.getMillisSinceLastLoadedEdits()); assertEquals(nn1.getHAState(), "standby"); assertTrue(0 < nn1.getMillisSinceLastLoadedEdits()); cluster.transitionToActive(0); final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); final ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeStatus"); final Long ltt1 = (Long) mbs.getAttribute(mxbeanName, "LastHATransitionTime"); assertTrue("lastHATransitionTime should be > 0", ltt1 > 0); assertEquals("active", nn0.getHAState()); assertEquals(0, nn0.getMillisSinceLastLoadedEdits()); assertEquals("standby", nn1.getHAState()); assertTrue(0 < nn1.getMillisSinceLastLoadedEdits()); cluster.transitionToStandby(0); final Long ltt2 = (Long) mbs.getAttribute(mxbeanName, "LastHATransitionTime"); assertTrue("lastHATransitionTime should be > " + ltt1, ltt2 > ltt1); cluster.transitionToActive(1); assertEquals("standby", nn0.getHAState()); assertTrue(0 < nn0.getMillisSinceLastLoadedEdits()); assertEquals("active", nn1.getHAState()); assertEquals(0, nn1.getMillisSinceLastLoadedEdits()); Thread.sleep(2000); // make sure standby gets a little out-of-date assertTrue(2000 <= nn0.getMillisSinceLastLoadedEdits()); assertEquals(0, nn0.getPendingDataNodeMessageCount()); assertEquals(0, nn1.getPendingDataNodeMessageCount()); fs = HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.createFile(fs, new Path("/foo"), 10, (short) 1, 1L); assertTrue(0 < nn0.getPendingDataNodeMessageCount()); assertEquals(0, nn1.getPendingDataNodeMessageCount()); long millisSinceLastLoadedEdits = nn0.getMillisSinceLastLoadedEdits(); HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(1), cluster.getNameNode(0)); assertEquals(0, nn0.getPendingDataNodeMessageCount()); assertEquals(0, nn1.getPendingDataNodeMessageCount()); long newMillisSinceLastLoadedEdits = nn0.getMillisSinceLastLoadedEdits(); // Since we just waited for the standby to catch up, the time since we // last loaded edits should be very low. assertTrue("expected " + millisSinceLastLoadedEdits + " > " + newMillisSinceLastLoadedEdits, millisSinceLastLoadedEdits > newMillisSinceLastLoadedEdits); } finally { IOUtils.cleanup(LOG, fs); cluster.shutdown(); } }
From source file:org.rhq.core.pc.PluginContainerMBeanImpl.java
public void unregister() { MBeanServer server = ManagementFactory.getPlatformMBeanServer(); try {/* ww w .j av a2 s .c o m*/ server.unregisterMBean(new ObjectName(OBJECT_NAME)); } catch (Exception e) { log.warn("Unable to unregister PluginContainerMBean", e); } }
From source file:uk.co.gidley.jmxmonitor.services.InternalJmx.java
public InternalJmx(MainConfiguration configuration, RegistryShutdownHub registryShutdownHub) throws InitialisationException, MalformedObjectNameException { //TODO make this a symbol this.PROPERTY_PREFIX = ThreadManager.PROPERTY_PREFIX; registryShutdownHub.addRegistryShutdownListener(this); connectorServerName = ObjectName.getInstance("connectors:protocol=rmi"); MBEAN_SERVER = ManagementFactory.getPlatformMBeanServer(); start(configuration.getConfiguration()); }
From source file:net.gcolin.simplerepo.test.AbstractRepoTest.java
protected Object executeOperationJmx(String name, String operation, Object[] arguments, String[] signature) throws Exception { MBeanServer server = ManagementFactory.getPlatformMBeanServer(); ObjectName oname = server.queryNames(new ObjectName(name), null).iterator().next(); return server.invoke(oname, operation, arguments, signature); }
From source file:org.wso2.carbon.ndatasource.rdbms.RDBMSDataSource.java
private void registerMBean() { MBeanServer mBeanServer = ManagementFactory.getPlatformMBeanServer(); String mBean = ""; try {// w w w . ja va2 s . c om if (DataSourceUtils.getCurrentDataSourceId() == null) { if (log.isDebugEnabled()) { log.debug("The current dataSource id is not set"); } return; } String[] dataSourceId = DataSourceUtils.getCurrentDataSourceId().split(":"); mBean = dataSourceId[1] + "," + dataSourceId[0]; ObjectName objectName = new ObjectName(mBean + ":type=DataSource"); mBeanServer.registerMBean(this.dataSource.createPool().getJmxPool(), objectName); } catch (InstanceAlreadyExistsException e) { //ignore as the mbean for the same datasource name is already exist } catch (MalformedObjectNameException e) { log.error("Error while registering the MBean for dataSource '" + mBean + " " + e.getMessage(), e); } catch (NotCompliantMBeanException e) { log.error("Error while registering the MBean for dataSource '" + mBean + " " + e.getMessage(), e); } catch (SQLException e) { log.error("Error while registering the MBean for dataSource '" + mBean + " " + e.getMessage(), e); } catch (MBeanRegistrationException e) { log.error("Error while registering the MBean for dataSource '" + mBean + " " + e.getMessage(), e); } }
From source file:org.opendaylight.controller.config.yang.logback.config.LogbackModuleWithInitialConfigurationTest.java
/** * Tests that initial configuration was changed. Changed attributes: * location, fileName, duplicateInsertTries. Added new FileAppender. *///from w ww . j a v a2 s.com @Test public void test() throws Exception { createBeans(); ConfigTransactionClient transaction = configRegistryClient.createTransaction(); LogbackModuleMXBean bean = JMX.newMXBeanProxy(ManagementFactory.getPlatformMBeanServer(), transaction.lookupConfigBean("logback", "singleton"), LogbackModuleMXBean.class); assertEquals(1, bean.getConsoleAppenderTO().size()); assertEquals(1, bean.getRollingFileAppenderTO().size()); assertEquals(0, bean.getFileAppenderTO().size()); assertEquals(1, bean.getLoggerTO().size()); RollingFileAppenderTO rolling = new RollingFileAppenderTO(); RollingFileAppenderTO old = bean.getRollingFileAppenderTO().get(0); rolling.setAppend(old.getAppend()); rolling.setEncoderPattern(old.getEncoderPattern()); rolling.setRollingPolicyType(old.getRollingPolicyType()); rolling.setFileName("target/logFile1.log"); rolling.setFileNamePattern("target/%i.log"); rolling.setMaxFileSize(old.getMaxFileSize()); rolling.setMinIndex(old.getMinIndex()); rolling.setMaxIndex(old.getMaxIndex()); rolling.setName("FILE"); ConsoleAppenderTO console = new ConsoleAppenderTO(); console.setEncoderPattern("%date %level [%thread] %logger{10} %msg%n"); console.setName("SYSTEM"); console.setThresholdFilter("DEBUG"); FileAppenderTO file = new FileAppenderTO(); file.setName("FILE_APPENDER"); file.setAppend(true); file.setEncoderPattern("%-4relative [%thread] %-5level %logger{35} - %msg%n"); file.setFileName("target/testFile.log"); bean.setConsoleAppenderTO(Lists.newArrayList(console)); bean.setRollingFileAppenderTO(Lists.newArrayList(rolling)); bean.setFileAppenderTO(Lists.newArrayList(file)); LoggerTO logger = new LoggerTO(); logger.setLevel("INFO"); logger.setLoggerName("logger"); logger.setAppenders(Lists.newArrayList("SYSTEM")); LoggerTO fileLogger = new LoggerTO(); fileLogger.setLevel("DEBUG"); fileLogger.setLoggerName("fileLogger"); fileLogger.setAppenders(Lists.newArrayList("FILE_APPENDER")); List<LoggerTO> loggers = Lists.newArrayList(logger, fileLogger); bean.setLoggerTO(loggers); transaction.commit(); LogbackModuleMXBean logback = configRegistryClient.newMXBeanProxy( ObjectNameUtil.createReadOnlyModuleON("logback", "singleton"), LogbackModuleMXBean.class); List<RollingFileAppenderTO> rollingList = logback.getRollingFileAppenderTO(); assertEquals(1, rollingList.size()); RollingFileAppenderTO rollingApp = rollingList.get(0); assertEquals(rollingApp.getFileName(), "target/logFile1.log"); assertEquals(rollingApp.getName(), "FILE"); List<ConsoleAppenderTO> consoleList = logback.getConsoleAppenderTO(); assertEquals(1, consoleList.size()); ConsoleAppenderTO consoleApp = consoleList.get(0); assertEquals(consoleApp.getThresholdFilter(), "DEBUG"); assertEquals(consoleApp.getName(), "SYSTEM"); List<FileAppenderTO> fileList = logback.getFileAppenderTO(); assertEquals(1, fileList.size()); FileAppenderTO fileApp = fileList.get(0); assertEquals(fileApp.getFileName(), "target/testFile.log"); assertEquals(fileApp.getName(), "FILE_APPENDER"); loggers = logback.getLoggerTO(); assertEquals(2, loggers.size()); assertEquals("logger", loggers.get(0).getLoggerName()); assertEquals("fileLogger", loggers.get(1).getLoggerName()); }
From source file:org.apache.streams.monitoring.tasks.BroadcastMonitorThread.java
public BroadcastMonitorThread(Map<String, Object> streamConfig) { keepRunning = true;//from ww w .j a v a 2 s .c om this.streamConfig = streamConfig; LOGGER.info("BroadcastMonitorThread starting" + streamConfig); server = ManagementFactory.getPlatformMBeanServer(); setBroadcastURI(); setWaitTime(); if (broadcastURI != null) { if (broadcastURI.getScheme().equals("http")) { messagePersister = new BroadcastMessagePersister(broadcastURI.toString()); } else if (broadcastURI.getScheme().equals("udp")) { messagePersister = new LogstashUdpMessagePersister(broadcastURI.toString()); } else { LOGGER.error("You need to specify a broadcast URI with either a HTTP or UDP protocol defined."); throw new RuntimeException(); } } else { messagePersister = new SLF4JMessagePersister(); } initializeObjectMapper(); LOGGER.info("BroadcastMonitorThread started"); }
From source file:org.apache.hadoop.hdfs.server.common.MetricsLoggerTask.java
/** * Write metrics to the metrics appender when invoked. *//*from w ww. j a v a2 s .c o m*/ @Override public void run() { // Skip querying metrics if there are no known appenders. if (!metricsLog.isInfoEnabled() || !hasAppenders(metricsLog) || objectName == null) { return; } metricsLog.info(" >> Begin " + nodeName + " metrics dump"); final MBeanServer server = ManagementFactory.getPlatformMBeanServer(); // Iterate over each MBean. for (final ObjectName mbeanName : server.queryNames(objectName, null)) { try { MBeanInfo mBeanInfo = server.getMBeanInfo(mbeanName); final String mBeanNameName = MBeans.getMbeanNameName(mbeanName); final Set<String> attributeNames = getFilteredAttributes(mBeanInfo); final AttributeList attributes = server.getAttributes(mbeanName, attributeNames.toArray(new String[attributeNames.size()])); for (Object o : attributes) { final Attribute attribute = (Attribute) o; final Object value = attribute.getValue(); final String valueStr = (value != null) ? value.toString() : "null"; // Truncate the value if it is too long metricsLog.info(mBeanNameName + ":" + attribute.getName() + "=" + trimLine(valueStr)); } } catch (Exception e) { metricsLog.error("Failed to get " + nodeName + " metrics for mbean " + mbeanName.toString(), e); } } metricsLog.info(" << End " + nodeName + " metrics dump"); }
From source file:org.jmxtrans.embedded.util.pool.ManagedGenericKeyedObjectPoolTest.java
@Test public void testMbeanAttributeAccess() throws Exception { BaseKeyedPooledObjectFactory<String, String> factory = new BaseKeyedPooledObjectFactory<String, String>() { @Override//from w ww . j a va 2 s .c o m public String create(String key) throws Exception { return key; } @Override public PooledObject<String> wrap(String value) { return new DefaultPooledObject<String>(value); } }; GenericKeyedObjectPoolConfig config = new GenericKeyedObjectPoolConfig(); config.setJmxNameBase("org.jmxtrans.embedded:type=GenericKeyedObjectPool,writer=MyWriter,name="); config.setJmxNamePrefix("my-host_1234"); GenericKeyedObjectPool<String, String> objectPool = new GenericKeyedObjectPool<String, String>(factory, config); ObjectName objectName = new ObjectName( "org.jmxtrans.embedded:type=GenericKeyedObjectPool,writer=MyWriter,name=my-host_1234"); MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); try { Object numIdle = mbeanServer.getAttribute(objectName, "NumIdle"); assertThat(numIdle, instanceOf(Number.class)); } finally { mbeanServer.unregisterMBean(objectName); } }