List of usage examples for javax.management MBeanServer getAttribute
public Object getAttribute(ObjectName name, String attribute) throws MBeanException, AttributeNotFoundException, InstanceNotFoundException, ReflectionException;
From source file:com.atolcd.alfresco.audit.web.scripts.ContentStoreInfoGet.java
@Override protected Map<String, Object> executeImpl(WebScriptRequest req, Status status, Cache cache) { try {//from w w w . j ava 2 s . co m // Map that will be passed to the template Map<String, Object> model = new HashMap<String, Object>(); ObjectName query = new ObjectName("Alfresco:Name=ContentStore,Type=*,Root=*"); MBeanServer mbs = getMBeanServerWithQuery(query); Set<ObjectName> storesName = mbs.queryNames(query, null); List<ContentStore> contentStores = new ArrayList<ContentStore>(); for (ObjectName storeName : storesName) { Object writeSupported = mbs.getAttribute(storeName, "WriteSupported"); Object totalSize = mbs.getAttribute(storeName, "SpaceUsed"); contentStores.add(new ContentStore(storeName.getKeyProperty("Root"), String.valueOf(writeSupported), String.valueOf(totalSize))); } model.put("contentStores", contentStores); return model; } catch (Exception e) { throw new WebScriptException("[ContentStoreInfoGet] Error in executeImpl function"); } }
From source file:org.apache.hadoop.hdfs.server.namenode.ha.TestHAMetrics.java
@Test(timeout = 300000) public void testHAMetrics() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, Integer.MAX_VALUE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(1).build();/* ww w .j av a 2s. c om*/ FileSystem fs = null; try { cluster.waitActive(); FSNamesystem nn0 = cluster.getNamesystem(0); FSNamesystem nn1 = cluster.getNamesystem(1); assertEquals(nn0.getHAState(), "standby"); assertTrue(0 < nn0.getMillisSinceLastLoadedEdits()); assertEquals(nn1.getHAState(), "standby"); assertTrue(0 < nn1.getMillisSinceLastLoadedEdits()); cluster.transitionToActive(0); final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); final ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeStatus"); final Long ltt1 = (Long) mbs.getAttribute(mxbeanName, "LastHATransitionTime"); assertTrue("lastHATransitionTime should be > 0", ltt1 > 0); assertEquals("active", nn0.getHAState()); assertEquals(0, nn0.getMillisSinceLastLoadedEdits()); assertEquals("standby", nn1.getHAState()); assertTrue(0 < nn1.getMillisSinceLastLoadedEdits()); cluster.transitionToStandby(0); final Long ltt2 = (Long) mbs.getAttribute(mxbeanName, "LastHATransitionTime"); assertTrue("lastHATransitionTime should be > " + ltt1, ltt2 > ltt1); cluster.transitionToActive(1); assertEquals("standby", nn0.getHAState()); assertTrue(0 < nn0.getMillisSinceLastLoadedEdits()); assertEquals("active", nn1.getHAState()); assertEquals(0, nn1.getMillisSinceLastLoadedEdits()); Thread.sleep(2000); // make sure standby gets a little out-of-date assertTrue(2000 <= nn0.getMillisSinceLastLoadedEdits()); assertEquals(0, nn0.getPendingDataNodeMessageCount()); assertEquals(0, nn1.getPendingDataNodeMessageCount()); fs = HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.createFile(fs, new Path("/foo"), 10, (short) 1, 1L); assertTrue(0 < nn0.getPendingDataNodeMessageCount()); assertEquals(0, nn1.getPendingDataNodeMessageCount()); long millisSinceLastLoadedEdits = nn0.getMillisSinceLastLoadedEdits(); HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(1), cluster.getNameNode(0)); assertEquals(0, nn0.getPendingDataNodeMessageCount()); assertEquals(0, nn1.getPendingDataNodeMessageCount()); long newMillisSinceLastLoadedEdits = nn0.getMillisSinceLastLoadedEdits(); // Since we just waited for the standby to catch up, the time since we // last loaded edits should be very low. assertTrue("expected " + millisSinceLastLoadedEdits + " > " + newMillisSinceLastLoadedEdits, millisSinceLastLoadedEdits > newMillisSinceLastLoadedEdits); } finally { IOUtils.cleanup(LOG, fs); cluster.shutdown(); } }
From source file:com.mtgi.analytics.jmx.StatisticsMBeanEventPersisterTest.java
private void waitForCount(ObjectName id, int count) throws JMException, InterruptedException { MBeanServer jmx = JmxUtils.locateMBeanServer(); long start = System.currentTimeMillis(); int actual = -1; do {//from w w w .ja v a2s . c o m if (jmx.isRegistered(id)) actual = ((Number) jmx.getAttribute(id, "Count")).intValue(); if (actual < count) Thread.sleep(10); } while (actual < count && (System.currentTimeMillis() - start) < 300000); assertEquals("events received for " + id, actual, count); }
From source file:net.testdriven.psiprobe.controllers.threads.ThreadStackController.java
protected ModelAndView handleRequestInternal(HttpServletRequest request, HttpServletResponse response) throws Exception { long threadID = ServletRequestUtils.getLongParameter(request, "id", -1); String threadName = ServletRequestUtils.getStringParameter(request, "name", null); List<ThreadStackElement> stack = null; MBeanServer mBeanServer = new Registry().getMBeanServer(); ObjectName threadingOName = new ObjectName("java.lang:type=Threading"); if (threadID == -1 && threadName != null) { // find thread by name long[] allIds = (long[]) mBeanServer.getAttribute(threadingOName, "AllThreadIds"); for (long allId : allIds) { CompositeData cd = (CompositeData) mBeanServer.invoke(threadingOName, "getThreadInfo", new Object[] { allId }, new String[] { "long" }); String name = JmxTools.getStringAttr(cd, "threadName"); if (threadName.equals(name)) { threadID = allId;// w w w . j a va 2 s . c o m break; } } } if (mBeanServer.queryMBeans(threadingOName, null) != null && threadID != -1) { CompositeData cd = (CompositeData) mBeanServer.invoke(threadingOName, "getThreadInfo", new Object[] { threadID, stackElementCount }, new String[] { "long", "int" }); if (cd != null) { CompositeData[] elements = (CompositeData[]) cd.get("stackTrace"); threadName = JmxTools.getStringAttr(cd, "threadName"); stack = new ArrayList<>(elements.length); for (CompositeData cd2 : elements) { ThreadStackElement tse = new ThreadStackElement(); tse.setClassName(JmxTools.getStringAttr(cd2, "className")); tse.setFileName(JmxTools.getStringAttr(cd2, "fileName")); tse.setMethodName(JmxTools.getStringAttr(cd2, "methodName")); tse.setLineNumber(JmxTools.getIntAttr(cd2, "lineNumber", -1)); tse.setNativeMethod(JmxTools.getBooleanAttr(cd2, "nativeMethod")); stack.add(tse); } } } return new ModelAndView(getViewName(), "stack", stack).addObject("threadName", threadName); }
From source file:com.googlecode.psiprobe.controllers.threads.ThreadStackController.java
protected ModelAndView handleRequestInternal(HttpServletRequest request, HttpServletResponse response) throws Exception { long threadID = ServletRequestUtils.getLongParameter(request, "id", -1); String threadName = ServletRequestUtils.getStringParameter(request, "name", null); List stack = null;/*from w w w . j a v a 2 s .c o m*/ MBeanServer mBeanServer = new Registry().getMBeanServer(); ObjectName threadingOName = new ObjectName("java.lang:type=Threading"); if (threadID == -1 && threadName != null) { // find thread by name long[] allIds = (long[]) mBeanServer.getAttribute(threadingOName, "AllThreadIds"); for (int i = 0; i < allIds.length; i++) { CompositeData cd = (CompositeData) mBeanServer.invoke(threadingOName, "getThreadInfo", new Object[] { new Long(allIds[i]) }, new String[] { "long" }); String name = JmxTools.getStringAttr(cd, "threadName"); if (threadName.equals(name)) { threadID = allIds[i]; break; } } } if (mBeanServer.queryMBeans(threadingOName, null) != null && threadID != -1) { CompositeData cd = (CompositeData) mBeanServer.invoke(threadingOName, "getThreadInfo", new Object[] { new Long(threadID), new Integer(stackElementCount) }, new String[] { "long", "int" }); if (cd != null) { CompositeData[] elements = (CompositeData[]) cd.get("stackTrace"); threadName = JmxTools.getStringAttr(cd, "threadName"); stack = new ArrayList(elements.length); for (int i = 0; i < elements.length; i++) { CompositeData cd2 = elements[i]; ThreadStackElement tse = new ThreadStackElement(); tse.setClassName(JmxTools.getStringAttr(cd2, "className")); tse.setFileName(JmxTools.getStringAttr(cd2, "fileName")); tse.setMethodName(JmxTools.getStringAttr(cd2, "methodName")); tse.setLineNumber(JmxTools.getIntAttr(cd2, "lineNumber", -1)); tse.setNativeMethod(JmxTools.getBooleanAttr(cd2, "nativeMethod")); stack.add(tse); } } } return new ModelAndView(getViewName(), "stack", stack).addObject("threadName", threadName); }
From source file:org.apache.hadoop.hdfs.server.namenode.TestHostsFiles.java
@Test public void testHostsExcludeInUI() throws Exception { Configuration conf = getConf(); short REPLICATION_FACTOR = 2; final Path filePath = new Path("/testFile"); // Configure an excludes file FileSystem localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); Path dir = new Path(workingDir, "build/test/data/temp/decommission"); Path excludeFile = new Path(dir, "exclude"); Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); DFSTestUtil.writeFile(localFileSys, excludeFile, ""); DFSTestUtil.writeFile(localFileSys, includeFile, ""); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); // Two blocks and four racks String racks[] = { "/rack1", "/rack1", "/rack2", "/rack2" }; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build(); final FSNamesystem ns = cluster.getNameNode().getNamesystem(); try {//from w w w . jav a 2 s . c o m // Create a file with one block final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Decommission one of the hosts with the block, this should cause // the block to get replicated to another host on the same rack, // otherwise the rack policy is violated. BlockLocation locs[] = fs.getFileBlockLocations(fs.getFileStatus(filePath), 0, Long.MAX_VALUE); String name = locs[0].getNames()[0]; String names = name + "\n" + "localhost:42\n"; LOG.info("adding '" + names + "' to exclude file " + excludeFile.toUri().getPath()); DFSTestUtil.writeFile(localFileSys, excludeFile, name); ns.getBlockManager().getDatanodeManager().refreshNodes(conf); DFSTestUtil.waitForDecommission(fs, name); // Check the block still has sufficient # replicas across racks DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo"); String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes"); assertTrue("Live nodes should contain the decommissioned node", nodes.contains("Decommissioned")); } finally { cluster.shutdown(); } }
From source file:psiprobe.controllers.threads.ThreadStackController.java
@Override protected ModelAndView handleRequestInternal(HttpServletRequest request, HttpServletResponse response) throws Exception { long threadId = ServletRequestUtils.getLongParameter(request, "id", -1); String threadName = ServletRequestUtils.getStringParameter(request, "name", null); List<ThreadStackElement> stack = null; MBeanServer mbeanServer = new Registry().getMBeanServer(); ObjectName threadingOName = new ObjectName("java.lang:type=Threading"); if (threadId == -1 && threadName != null) { // find thread by name for (long id : (long[]) mbeanServer.getAttribute(threadingOName, "AllThreadIds")) { CompositeData cd = (CompositeData) mbeanServer.invoke(threadingOName, "getThreadInfo", new Object[] { id }, new String[] { "long" }); String name = JmxTools.getStringAttr(cd, "threadName"); if (threadName.equals(name)) { threadId = id;/*from ww w.j ava 2s. c o m*/ break; } } } if (mbeanServer.queryMBeans(threadingOName, null) != null && threadId != -1) { CompositeData cd = (CompositeData) mbeanServer.invoke(threadingOName, "getThreadInfo", new Object[] { threadId, stackElementCount }, new String[] { "long", "int" }); if (cd != null) { CompositeData[] elements = (CompositeData[]) cd.get("stackTrace"); threadName = JmxTools.getStringAttr(cd, "threadName"); stack = new ArrayList<>(elements.length); for (CompositeData cd2 : elements) { ThreadStackElement tse = new ThreadStackElement(); tse.setClassName(JmxTools.getStringAttr(cd2, "className")); tse.setFileName(JmxTools.getStringAttr(cd2, "fileName")); tse.setMethodName(JmxTools.getStringAttr(cd2, "methodName")); tse.setLineNumber(JmxTools.getIntAttr(cd2, "lineNumber", -1)); tse.setNativeMethod(JmxTools.getBooleanAttr(cd2, "nativeMethod")); stack.add(tse); } } } return new ModelAndView(getViewName(), "stack", stack).addObject("threadName", threadName); }
From source file:com.googlecode.psiprobe.beans.ResourceResolverBean.java
private String getStringAttribute(MBeanServer server, ObjectName objectName, String attributeName) { try {/*from ww w . j a v a 2 s . com*/ return (String) server.getAttribute(objectName, attributeName); } catch (Exception e) { logger.error("Error getting attribute '" + attributeName + "' from '" + objectName + "'", e); return null; } }
From source file:org.apache.hadoop.hdfs.server.datanode.TestDataNodeMXBean.java
@Test public void testDataNodeMXBeanBlockSize() throws Exception { Configuration conf = new Configuration(); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) { DataNode dn = cluster.getDataNodes().get(0); for (int i = 0; i < 100; i++) { DFSTestUtil.writeFile(cluster.getFileSystem(), new Path("/foo" + String.valueOf(i) + ".txt"), "test content"); }// w w w .j a va2 s.co m DataNodeTestUtils.triggerBlockReport(dn); MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName = new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo"); String bpActorInfo = (String) mbs.getAttribute(mxbeanName, "BPServiceActorInfo"); Assert.assertEquals(dn.getBPServiceActorInfo(), bpActorInfo); LOG.info("bpActorInfo is " + bpActorInfo); TypeReference<ArrayList<Map<String, String>>> typeRef = new TypeReference<ArrayList<Map<String, String>>>() { }; ArrayList<Map<String, String>> bpActorInfoList = new ObjectMapper().readValue(bpActorInfo, typeRef); int maxDataLength = Integer.valueOf(bpActorInfoList.get(0).get("maxDataLength")); int confMaxDataLength = dn.getConf().getInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH_DEFAULT); int maxBlockReportSize = Integer.valueOf(bpActorInfoList.get(0).get("maxBlockReportSize")); LOG.info("maxDataLength is " + maxDataLength); LOG.info("maxBlockReportSize is " + maxBlockReportSize); assertTrue("maxBlockReportSize should be greater than zero", maxBlockReportSize > 0); assertEquals("maxDataLength should be exactly " + "the same value of ipc.maximum.data.length", confMaxDataLength, maxDataLength); } }
From source file:psiprobe.beans.ResourceResolverBean.java
/** * Gets the string attribute.//from w w w. j av a 2 s . c o m * * @param server the server * @param objectName the object name * @param attributeName the attribute name * @return the string attribute */ private String getStringAttribute(MBeanServer server, ObjectName objectName, String attributeName) { try { return (String) server.getAttribute(objectName, attributeName); } catch (Exception e) { logger.error("Error getting attribute '{}' from '{}'", attributeName, objectName, e); return null; } }