Example usage for java.lang.management ManagementFactory getPlatformMBeanServer

List of usage examples for java.lang.management ManagementFactory getPlatformMBeanServer

Introduction

In this page you can find the example usage for java.lang.management ManagementFactory getPlatformMBeanServer.

Prototype

public static synchronized MBeanServer getPlatformMBeanServer() 

Source Link

Document

Returns the platform javax.management.MBeanServer MBeanServer .

Usage

From source file:lineage2.gameserver.network.telnet.commands.TelnetPerfomance.java

/**
 * Constructor for TelnetPerfomance.//from w w w. j a  v a2s . co  m
 */
public TelnetPerfomance() {
    _commands.add(new TelnetCommand("pool", "p") {
        @Override
        public String getUsage() {
            return "pool [dump]";
        }

        @Override
        public String handle(String[] args) {
            StringBuilder sb = new StringBuilder();
            if ((args.length == 0) || args[0].isEmpty()) {
                sb.append(ThreadPoolManager.getInstance().getStats());
            } else if (args[0].equals("dump") || args[0].equals("d")) {
                try {
                    new File("stats").mkdir();
                    FileUtils
                            .writeStringToFile(
                                    new File("stats/RunnableStats-" + new SimpleDateFormat("MMddHHmmss")
                                            .format(System.currentTimeMillis()) + ".txt"),
                                    RunnableStatsManager.getInstance().getStats().toString());
                    sb.append("Runnable stats saved.\n");
                } catch (IOException e) {
                    sb.append("Exception: " + e.getMessage() + "!\n");
                }
            } else {
                return null;
            }
            return sb.toString();
        }
    });
    _commands.add(new TelnetCommand("mem", "m") {
        @Override
        public String getUsage() {
            return "mem";
        }

        @Override
        public String handle(String[] args) {
            StringBuilder sb = new StringBuilder();
            sb.append(StatsUtils.getMemUsage());
            return sb.toString();
        }
    });
    _commands.add(new TelnetCommand("heap") {
        @Override
        public String getUsage() {
            return "heap [dump] <live>";
        }

        @Override
        public String handle(String[] args) {
            StringBuilder sb = new StringBuilder();
            if ((args.length == 0) || args[0].isEmpty()) {
                return null;
            } else if (args[0].equals("dump") || args[0].equals("d")) {
                try {
                    boolean live = (args.length == 2) && !args[1].isEmpty()
                            && (args[1].equals("live") || args[1].equals("l"));
                    new File("dumps").mkdir();
                    String filename = "dumps/HeapDump" + (live ? "Live" : "") + "-"
                            + new SimpleDateFormat("MMddHHmmss").format(System.currentTimeMillis()) + ".hprof";
                    MBeanServer server = ManagementFactory.getPlatformMBeanServer();
                    HotSpotDiagnosticMXBean bean = ManagementFactory.newPlatformMXBeanProxy(server,
                            "com.sun.management:type=HotSpotDiagnostic", HotSpotDiagnosticMXBean.class);
                    bean.dumpHeap(filename, live);
                    sb.append("Heap dumped.\n");
                } catch (IOException e) {
                    sb.append("Exception: " + e.getMessage() + "!\n");
                }
            } else {
                return null;
            }
            return sb.toString();
        }
    });
    _commands.add(new TelnetCommand("threads", "t") {
        @Override
        public String getUsage() {
            return "threads [dump]";
        }

        @Override
        public String handle(String[] args) {
            StringBuilder sb = new StringBuilder();
            if ((args.length == 0) || args[0].isEmpty()) {
                sb.append(StatsUtils.getThreadStats());
            } else if (args[0].equals("dump") || args[0].equals("d")) {
                try {
                    new File("stats").mkdir();
                    FileUtils
                            .writeStringToFile(
                                    new File("stats/ThreadsDump-" + new SimpleDateFormat("MMddHHmmss")
                                            .format(System.currentTimeMillis()) + ".txt"),
                                    StatsUtils.getThreadStats(true, true, true).toString());
                    sb.append("Threads stats saved.\n");
                } catch (IOException e) {
                    sb.append("Exception: " + e.getMessage() + "!\n");
                }
            } else {
                return null;
            }
            return sb.toString();
        }
    });
    _commands.add(new TelnetCommand("gc") {
        @Override
        public String getUsage() {
            return "gc";
        }

        @Override
        public String handle(String[] args) {
            StringBuilder sb = new StringBuilder();
            sb.append(StatsUtils.getGCStats());
            return sb.toString();
        }
    });
    _commands.add(new TelnetCommand("net", "ns") {
        @Override
        public String getUsage() {
            return "net";
        }

        @Override
        public String handle(String[] args) {
            StringBuilder sb = new StringBuilder();
            sb.append(SelectorThread.getStats());
            return sb.toString();
        }
    });
    _commands.add(new TelnetCommand("pathfind", "pfs") {
        @Override
        public String getUsage() {
            return "pathfind";
        }

        @Override
        public String handle(String[] args) {
            StringBuilder sb = new StringBuilder();
            sb.append(PathFindBuffers.getStats());
            return sb.toString();
        }
    });
    _commands.add(new TelnetCommand("dbstats", "ds") {
        @Override
        public String getUsage() {
            return "dbstats";
        }

        @Override
        public String handle(String[] args) {
            StringBuilder sb = new StringBuilder();
            sb.append("Basic database usage\n");
            sb.append("=================================================\n");
            sb.append("Connections").append('\n');
            sb.append("     Busy: ........................ ")
                    .append(DatabaseFactory.getInstance().getBusyConnectionCount()).append('\n');
            sb.append("     Idle: ........................ ")
                    .append(DatabaseFactory.getInstance().getIdleConnectionCount()).append('\n');
            sb.append("Players").append('\n');
            sb.append("     Update: ...................... ").append(GameStats.getUpdatePlayerBase())
                    .append('\n');
            double cacheHitCount, cacheMissCount, cacheHitRatio;
            Cache cache;
            LiveCacheStatistics cacheStats;
            JdbcEntityStats entityStats;
            cache = ItemsDAO.getInstance().getCache();
            cacheStats = cache.getLiveCacheStatistics();
            entityStats = ItemsDAO.getInstance().getStats();
            cacheHitCount = cacheStats.getCacheHitCount();
            cacheMissCount = cacheStats.getCacheMissCount();
            cacheHitRatio = cacheHitCount / (cacheHitCount + cacheMissCount);
            sb.append("Items").append('\n');
            sb.append("     getLoadCount: ................ ").append(entityStats.getLoadCount()).append('\n');
            sb.append("     getInsertCount: .............. ").append(entityStats.getInsertCount()).append('\n');
            sb.append("     getUpdateCount: .............. ").append(entityStats.getUpdateCount()).append('\n');
            sb.append("     getDeleteCount: .............. ").append(entityStats.getDeleteCount()).append('\n');
            sb.append("Cache").append('\n');
            sb.append("     getPutCount: ................. ").append(cacheStats.getPutCount()).append('\n');
            sb.append("     getUpdateCount: .............. ").append(cacheStats.getUpdateCount()).append('\n');
            sb.append("     getRemovedCount: ............. ").append(cacheStats.getRemovedCount()).append('\n');
            sb.append("     getEvictedCount: ............. ").append(cacheStats.getEvictedCount()).append('\n');
            sb.append("     getExpiredCount: ............. ").append(cacheStats.getExpiredCount()).append('\n');
            sb.append("     getSize: ..................... ").append(cacheStats.getSize()).append('\n');
            sb.append("     getLocalHeapSize: ............. ").append(cacheStats.getLocalHeapSize())
                    .append('\n');
            sb.append("     getLocalDiskSize: ............... ").append(cacheStats.getLocalDiskSize())
                    .append('\n');
            sb.append("     cacheHitRatio: ............... ").append(String.format("%2.2f", cacheHitRatio))
                    .append('\n');
            sb.append("=================================================\n");
            cache = MailDAO.getInstance().getCache();
            cacheStats = cache.getLiveCacheStatistics();
            entityStats = MailDAO.getInstance().getStats();
            cacheHitCount = cacheStats.getCacheHitCount();
            cacheMissCount = cacheStats.getCacheMissCount();
            cacheHitRatio = cacheHitCount / (cacheHitCount + cacheMissCount);
            sb.append("Mail").append('\n');
            sb.append("     getLoadCount: ................ ").append(entityStats.getLoadCount()).append('\n');
            sb.append("     getInsertCount: .............. ").append(entityStats.getInsertCount()).append('\n');
            sb.append("     getUpdateCount: .............. ").append(entityStats.getUpdateCount()).append('\n');
            sb.append("     getDeleteCount: .............. ").append(entityStats.getDeleteCount()).append('\n');
            sb.append("Cache").append('\n');
            sb.append("     getPutCount: ................. ").append(cacheStats.getPutCount()).append('\n');
            sb.append("     getUpdateCount: .............. ").append(cacheStats.getUpdateCount()).append('\n');
            sb.append("     getRemovedCount: ............. ").append(cacheStats.getRemovedCount()).append('\n');
            sb.append("     getEvictedCount: ............. ").append(cacheStats.getEvictedCount()).append('\n');
            sb.append("     getExpiredCount: ............. ").append(cacheStats.getExpiredCount()).append('\n');
            sb.append("     getSize: ..................... ").append(cacheStats.getSize()).append('\n');
            sb.append("     getLocalHeapSize: ............. ").append(cacheStats.getLocalHeapSize())
                    .append('\n');
            sb.append("     getLocalDiskSize: ............... ").append(cacheStats.getLocalDiskSize())
                    .append('\n');
            sb.append("     cacheHitRatio: ............... ").append(String.format("%2.2f", cacheHitRatio))
                    .append('\n');
            sb.append("=================================================\n");
            return sb.toString();
        }
    });
    _commands.add(new TelnetCommand("aistats", "as") {
        @Override
        public String getUsage() {
            return "aistats";
        }

        @Override
        public String handle(String[] args) {
            StringBuilder sb = new StringBuilder();
            for (int i = 0; i < Config.AI_TASK_MANAGER_COUNT; i++) {
                sb.append("AiTaskManager #").append(i + 1).append('\n');
                sb.append("=================================================\n");
                sb.append(AiTaskManager.getInstance().getStats(i));
                sb.append("=================================================\n");
            }
            return sb.toString();
        }
    });
    _commands.add(new TelnetCommand("effectstats", "es") {
        @Override
        public String getUsage() {
            return "effectstats";
        }

        @Override
        public String handle(String[] args) {
            StringBuilder sb = new StringBuilder();
            for (int i = 0; i < Config.EFFECT_TASK_MANAGER_COUNT; i++) {
                sb.append("EffectTaskManager #").append(i + 1).append('\n');
                sb.append("=================================================\n");
                sb.append(EffectTaskManager.getInstance().getStats(i));
                sb.append("=================================================\n");
            }
            return sb.toString();
        }
    });
}

From source file:org.opendaylight.controller.netconf.it.NetconfITTest.java

@Before
public void setUp() throws Exception {
    super.initConfigTransactionManagerImpl(
            new HardcodedModuleFactoriesResolver(getModuleFactories().toArray(new ModuleFactory[0])));

    loadMessages();/*from w  w w  . ja  v a 2  s . c o  m*/

    NetconfOperationServiceFactoryListenerImpl factoriesListener = new NetconfOperationServiceFactoryListenerImpl();
    factoriesListener
            .onAddNetconfOperationServiceFactory(new NetconfOperationServiceFactoryImpl(getYangStore()));

    commitNot = new DefaultCommitNotificationProducer(ManagementFactory.getPlatformMBeanServer());

    dispatch = createDispatcher(factoriesListener);
    ChannelFuture s = dispatch.createServer(tcpAddress);
    s.await();

    clientDispatcher = new NetconfClientDispatcher(nettyThreadgroup, nettyThreadgroup, 5000);
}

From source file:uk.co.tfd.sm.memory.ehcache.CacheManagerServiceImpl.java

@Activate
public void activate(Map<String, Object> properties) throws FileNotFoundException, IOException {
    String config = toString(properties.get(CACHE_CONFIG), DEFAULT_CACHE_CONFIG);
    File configFile = new File(config);
    if (configFile.exists()) {
        LOGGER.info("Configuring Cache from {} ", configFile.getAbsolutePath());
        InputStream in = null;/*w w w.j av  a  2  s .  co m*/
        try {
            in = processConfig(new FileInputStream(configFile), properties);
            cacheManager = new CacheManager(in);
        } finally {
            if (in != null) {
                in.close();
            }
        }
    } else {
        LOGGER.info("Configuring Cache from Classpath Default {} ", CONFIG_PATH);
        InputStream in = processConfig(this.getClass().getClassLoader().getResourceAsStream(CONFIG_PATH),
                properties);
        if (in == null) {
            throw new IOException("Unable to open config at classpath location " + CONFIG_PATH);
        }
        cacheManager = new CacheManager(in);
        in.close();
    }

    final WeakReference<CacheManagerServiceImpl> ref = new WeakReference<CacheManagerServiceImpl>(this);
    /*
     * Add in a shutdown hook, for safety
     */
    Runtime.getRuntime().addShutdownHook(new Thread() {
        /*
         * (non-Javadoc)
         *
         * @see java.lang.Thread#run()
         */
        @Override
        public void run() {
            try {
                CacheManagerServiceImpl cm = ref.get();
                if (cm != null) {
                    cm.deactivate();
                }
            } catch (Throwable t) {
                LOGGER.debug(t.getMessage(), t);
            }
        }
    });

    // register the cache manager with JMX
    MBeanServer mBeanServer = ManagementFactory.getPlatformMBeanServer();
    ManagementService.registerMBeans(cacheManager, mBeanServer, true, true, true, true);

}

From source file:org.apache.hadoop.hbase.JMXListener.java

public void startConnectorServer(int rmiRegistryPort, int rmiConnectorPort) throws IOException {
    boolean rmiSSL = false;
    boolean authenticate = true;
    String passwordFile = null;/*from   w w w.  j  a va2s . com*/
    String accessFile = null;

    System.setProperty("java.rmi.server.randomIDs", "true");

    String rmiSSLValue = System.getProperty("com.sun.management.jmxremote.ssl", "false");
    rmiSSL = Boolean.parseBoolean(rmiSSLValue);

    String authenticateValue = System.getProperty("com.sun.management.jmxremote.authenticate", "false");
    authenticate = Boolean.parseBoolean(authenticateValue);

    passwordFile = System.getProperty("com.sun.management.jmxremote.password.file");
    accessFile = System.getProperty("com.sun.management.jmxremote.access.file");

    LOG.info("rmiSSL:" + rmiSSLValue + ",authenticate:" + authenticateValue + ",passwordFile:" + passwordFile
            + ",accessFile:" + accessFile);

    // Environment map
    HashMap<String, Object> jmxEnv = new HashMap<String, Object>();

    RMIClientSocketFactory csf = null;
    RMIServerSocketFactory ssf = null;

    if (rmiSSL) {
        if (rmiRegistryPort == rmiConnectorPort) {
            throw new IOException(
                    "SSL is enabled. " + "rmiConnectorPort cannot share with the rmiRegistryPort!");
        }
        csf = new SslRMIClientSocketFactory();
        ssf = new SslRMIServerSocketFactory();
    }

    if (csf != null) {
        jmxEnv.put(RMIConnectorServer.RMI_CLIENT_SOCKET_FACTORY_ATTRIBUTE, csf);
    }
    if (ssf != null) {
        jmxEnv.put(RMIConnectorServer.RMI_SERVER_SOCKET_FACTORY_ATTRIBUTE, ssf);
    }

    // Configure authentication
    if (authenticate) {
        jmxEnv.put("jmx.remote.x.password.file", passwordFile);
        jmxEnv.put("jmx.remote.x.access.file", accessFile);
    }

    // Create the RMI registry
    LocateRegistry.createRegistry(rmiRegistryPort);
    // Retrieve the PlatformMBeanServer.
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();

    // Build jmxURL
    JMXServiceURL serviceUrl = buildJMXServiceURL(rmiRegistryPort, rmiConnectorPort);

    try {
        // Start the JMXListener with the connection string
        jmxCS = JMXConnectorServerFactory.newJMXConnectorServer(serviceUrl, jmxEnv, mbs);
        jmxCS.start();
        LOG.info("ConnectorServer started!");
    } catch (IOException e) {
        LOG.error("fail to start connector server!", e);
    }

}

From source file:org.apache.hadoop.hdfs.server.datanode.TestDataNodeMXBean.java

@Test
public void testDataNodeMXBeanBlockSize() throws Exception {
    Configuration conf = new Configuration();

    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) {
        DataNode dn = cluster.getDataNodes().get(0);
        for (int i = 0; i < 100; i++) {
            DFSTestUtil.writeFile(cluster.getFileSystem(), new Path("/foo" + String.valueOf(i) + ".txt"),
                    "test content");
        }//from   w w w  . j av  a 2  s. c  o m
        DataNodeTestUtils.triggerBlockReport(dn);
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanName = new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo");
        String bpActorInfo = (String) mbs.getAttribute(mxbeanName, "BPServiceActorInfo");
        Assert.assertEquals(dn.getBPServiceActorInfo(), bpActorInfo);
        LOG.info("bpActorInfo is " + bpActorInfo);
        TypeReference<ArrayList<Map<String, String>>> typeRef = new TypeReference<ArrayList<Map<String, String>>>() {
        };
        ArrayList<Map<String, String>> bpActorInfoList = new ObjectMapper().readValue(bpActorInfo, typeRef);
        int maxDataLength = Integer.valueOf(bpActorInfoList.get(0).get("maxDataLength"));
        int confMaxDataLength = dn.getConf().getInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH,
                CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH_DEFAULT);
        int maxBlockReportSize = Integer.valueOf(bpActorInfoList.get(0).get("maxBlockReportSize"));
        LOG.info("maxDataLength is " + maxDataLength);
        LOG.info("maxBlockReportSize is " + maxBlockReportSize);
        assertTrue("maxBlockReportSize should be greater than zero", maxBlockReportSize > 0);
        assertEquals("maxDataLength should be exactly " + "the same value of ipc.maximum.data.length",
                confMaxDataLength, maxDataLength);
    }
}

From source file:org.codice.ddf.configuration.migration.ConfigurationMigrationManagerTest.java

@After
public void tearDown() throws Exception {
    System.getProperties().remove(RESTART_JVM);
    System.getProperties().remove(WRAPPER_KEY);
    ManagementFactory.getPlatformMBeanServer()
            .unregisterMBean(new ObjectName("org.tanukisoftware.wrapper:type=WrapperManager"));
}

From source file:org.codice.ddf.catalog.download.action.ResourceDownloadActionProvider.java

ResourceCacheServiceMBean createResourceCacheMBeanProxy() {
    try {//from   w  ww  .j  a v a  2  s. c  om
        return JMX.newMBeanProxy(ManagementFactory.getPlatformMBeanServer(),
                new ObjectName(ResourceCacheServiceMBean.OBJECT_NAME), ResourceCacheServiceMBean.MBEAN_CLASS);
    } catch (MalformedObjectNameException e) {
        String message = String.format("Unable to create MBean proxy for [%s].",
                ResourceCacheServiceMBean.class.getName());
        LOGGER.debug(message, e);
        throw new ResourceDownloadActionException(message, e);
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestHostsFiles.java

@Test
public void testHostsExcludeInUI() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 2;
    final Path filePath = new Path("/testFile");

    // Configure an excludes file
    FileSystem localFileSys = FileSystem.getLocal(conf);
    Path workingDir = localFileSys.getWorkingDirectory();
    Path dir = new Path(workingDir, "build/test/data/temp/decommission");
    Path excludeFile = new Path(dir, "exclude");
    Path includeFile = new Path(dir, "include");
    assertTrue(localFileSys.mkdirs(dir));
    DFSTestUtil.writeFile(localFileSys, excludeFile, "");
    DFSTestUtil.writeFile(localFileSys, includeFile, "");
    conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
    conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());

    // Two blocks and four racks
    String racks[] = { "/rack1", "/rack1", "/rack2", "/rack2" };
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();

    try {/*from   ww  w.  j a v  a 2 s . c  om*/
        // Create a file with one block
        final FileSystem fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
        ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
        DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

        // Decommission one of the hosts with the block, this should cause 
        // the block to get replicated to another host on the same rack,
        // otherwise the rack policy is violated.
        BlockLocation locs[] = fs.getFileBlockLocations(fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
        String name = locs[0].getNames()[0];
        String names = name + "\n" + "localhost:42\n";
        LOG.info("adding '" + names + "' to exclude file " + excludeFile.toUri().getPath());
        DFSTestUtil.writeFile(localFileSys, excludeFile, name);
        ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
        DFSTestUtil.waitForDecommission(fs, name);

        // Check the block still has sufficient # replicas across racks
        DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
        String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
        assertTrue("Live nodes should contain the decommissioned node", nodes.contains("Decommissioned"));
    } finally {
        cluster.shutdown();
    }
}

From source file:com.parallels.desktopcloud.ParallelsDesktopConnectorSlaveComputer.java

private static VMResources getHostResources(Channel ch) throws Exception {
    return ch.call(new MasterToSlaveCallable<VMResources, Exception>() {
        private long getHostPhysicalMemory() {
            try {
                MBeanServer server = ManagementFactory.getPlatformMBeanServer();
                Object attribute = server.getAttribute(new ObjectName("java.lang", "type", "OperatingSystem"),
                        "TotalPhysicalMemorySize");
                return (Long) attribute;
            } catch (JMException e) {
                LOGGER.log(Level.SEVERE, "Failed to get host RAM size: %s", e);
                return Long.MAX_VALUE;
            }//from w  ww.  ja v  a  2 s . c  om
        }

        @Override
        public VMResources call() throws Exception {
            int cpus = Runtime.getRuntime().availableProcessors();
            long ram = getHostPhysicalMemory();
            return new VMResources(cpus, ram);
        }
    });
}

From source file:org.nuxeo.runtime.management.ServerLocatorService.java

protected void doUnregisterLocator(ServerLocatorDescriptor descriptor) {
    servers.remove(descriptor.domainName);
    if (descriptor.isDefault) {
        defaultServer = ManagementFactory.getPlatformMBeanServer();
    }//from   w w w  .ja v  a2 s . c om
}