Example usage for java.lang.management ManagementFactory getMemoryMXBean

List of usage examples for java.lang.management ManagementFactory getMemoryMXBean

Introduction

In this page you can find the example usage for java.lang.management ManagementFactory getMemoryMXBean.

Prototype

public static MemoryMXBean getMemoryMXBean() 

Source Link

Document

Returns the managed bean for the memory system of the Java virtual machine.

Usage

From source file:org.jahia.modules.serversettings.memoryThread.MemoryThreadInformationManagement.java

public MemoryThreadInformationManagement refresh() {
    MemoryUsage usage = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
    committedMemory = FileUtils.humanReadableByteCount(usage.getCommitted(), true);
    maxMemory = FileUtils.humanReadableByteCount(usage.getMax(), true);
    usedMemory = FileUtils.humanReadableByteCount(usage.getUsed(), true);
    memoryUsage = Math.round((double) usage.getUsed() / (double) usage.getMax() * 100d);
    return this;
}

From source file:com.opengamma.web.WebAbout.java

/**
 * Gets the memory JMX.
 * @return the memory JMX
 */
public MemoryMXBean getMemoryJmx() {
    return ManagementFactory.getMemoryMXBean();
}

From source file:com.janrain.backplane.server.metrics.MetricsAccumulator.java

private Map<String, Object> outputJVMUsage() {

    long mb = 1048576;

    Map<String, Object> out = new LinkedHashMap<String, Object>();

    long startTime = ManagementFactory.getRuntimeMXBean().getStartTime();
    int totalLiveThreads = ManagementFactory.getThreadMXBean().getThreadCount();
    double loadAverage = ManagementFactory.getOperatingSystemMXBean().getSystemLoadAverage();
    MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();

    String startTimeString = BackplaneConfig.ISO8601.format(new Date(startTime));

    out.put("type", "jvm");
    out.put("unit", "mb");
    out.put("heap_used", mu.getUsed() / mb);
    out.put("heap_free", (mu.getMax() - mu.getUsed()) / mb);
    out.put("heap_max", mu.getMax() / mb);
    out.put("jvm_start_time", startTimeString);
    out.put("total_live_threads", totalLiveThreads);
    out.put("load_average_minute", String.format("%2.2f", loadAverage));

    return out;/*from   ww w . ja v a2 s  .  c om*/

}

From source file:org.tomitribe.tribestream.registryng.bootstrap.Provisioning.java

private void seedFile(final File swaggerFile) {
    LOGGER.info("Seeding " + swaggerFile.getName());

    try {//from  w w  w . j a  v a2  s. co m
        final Swagger swagger = mapper.readValue(swaggerFile, Swagger.class);

        if (repository.findApplicationByNameAndVersion(swagger.getInfo().getTitle(),
                swagger.getInfo().getVersion()) == null) {
            OpenApiDocument openApiDocument = repository.insert(swagger);
            LOGGER.log(Level.INFO, "Persisted application {0}-{1}",
                    new Object[] { openApiDocument.getName(), openApiDocument.getVersion() });

        } else {
            LOGGER.log(Level.INFO, "Application {0}-{1} already available in DB ",
                    new Object[] { swagger.getInfo().getTitle(), swagger.getInfo().getVersion() });
        }

    } catch (final Exception e) {
        LOGGER.log(Level.WARNING, e, () -> String.format("Seeding %s failed!", swaggerFile.getName()));
    }
    LOGGER.info("Memory = " + ManagementFactory.getMemoryMXBean().getHeapMemoryUsage());
}

From source file:com.datatorrent.contrib.hdht.HadoopFilePerformanceTest.java

@AfterClass
public static void summary() throws Exception {
    long heapMax = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
    long nonHeapMax = ManagementFactory.getMemoryMXBean().getNonHeapMemoryUsage().getMax();
    logger.info("==============================================================================");
    logger.info("Test Size: " + String.format("%,d", testSize) + " pairs (" + String.format("%,d", keySizeBytes)
            + " key bytes /" + String.format("%,d", valueSizeBytes) + " value bytes)");
    logger.info("Memory: " + String.format("%,d", heapMax) + " Heap MAX +  " + String.format("%,d", nonHeapMax)
            + " Non-Heap Max =  " + String.format("%,d", (heapMax + nonHeapMax)) + " Total MAX");
    logger.info("==============================================================================");
    logger.info("KV PAIRS (" + keySizeBytes + "/" + valueSizeBytes + "), "
            + "TEST ID, ELAPSED TIME (s/microseconds), FILE SIZE (bytes)");
    Iterator<?> it = testSummary.entrySet().iterator();
    while (it.hasNext()) {
        Map.Entry<?, ?> kv = (Map.Entry<?, ?>) it.next();
        logger.info(kv.getKey() + "," + kv.getValue());
    }//w  w w.  java  2s.c  om
}

From source file:com.ebay.erl.mobius.core.collection.BigTupleList.java

/**
 * Create an instance of {@link BigTupleList} with
 * specified <code>comparator</code>.
 * <p>// w  w  w.  java  2s. c  om
 * 
 * {@link Tuple}s stored in this list is sorted by
 * the <code>comparator</code>
 */
public BigTupleList(Comparator<Tuple> comparator, Reporter reporter) {
    if (comparator != null)
        this.comparator = comparator;

    // register itself to the {@link MemoryMXBean} to receive
    // notification when the used memory exceed the threshold
    // (about to GC), the notification is handled in 
    // {@link #handleNotification(Notification, Object)}
    MemoryMXBean memBean = ManagementFactory.getMemoryMXBean();
    NotificationEmitter ne = (NotificationEmitter) memBean;
    //ne.addNotificationListener(this, null, null);

    // setup local folder to store temporary files which contain
    // tuples that cannot feet in memory.

    this.workOutput = new File("tmp");
    if (!_LOG_OUTPUT) {
        LOGGER.info("working output is located in:" + this.workOutput.getAbsolutePath().toString());
        _LOG_OUTPUT = true;
    }
    JVMShutdownNotifier.getInstance().addObserver(this);

    synchronized (KEY) {
        _ID = _GLOBE_ID;
        _GLOBE_ID++;
    }

    this.reporter = reporter;
}

From source file:me.doshou.admin.monitor.web.controller.HibernateCacheMonitorController.java

private void setMemoryInfo(Model model) {
    ////  www  .  jav a2 s . c o m
    MemoryUsage heapMemoryUsage = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
    long usedSystemMemory = heapMemoryUsage.getUsed();
    long maxSystemMemory = heapMemoryUsage.getMax();
    model.addAttribute("usedSystemMemory", usedSystemMemory);
    model.addAttribute("maxSystemMemory", maxSystemMemory);

    //
    Statistics statistics = (Statistics) model.asMap().get("statistics");
    String[] secondLevelCacheRegionNames = statistics.getSecondLevelCacheRegionNames();

    int totalMemorySize = 0;
    int totalMemoryCount = 0;
    int totalDiskCount = 0;

    for (String secondLevelCacheRegionName : secondLevelCacheRegionNames) {
        SecondLevelCacheStatistics secondLevelCacheStatistics = statistics
                .getSecondLevelCacheStatistics(secondLevelCacheRegionName);
        totalMemorySize += secondLevelCacheStatistics.getSizeInMemory();
        totalMemoryCount += secondLevelCacheStatistics.getElementCountInMemory();
        totalDiskCount += secondLevelCacheStatistics.getElementCountOnDisk();
    }

    model.addAttribute("totalMemorySize", totalMemorySize);
    model.addAttribute("totalMemoryCount", totalMemoryCount);
    model.addAttribute("totalDiskCount", totalDiskCount);
}

From source file:edu.uci.ics.hyracks.control.nc.NodeControllerService.java

public NodeControllerService(NCConfig ncConfig) throws Exception {
    this.ncConfig = ncConfig;
    id = ncConfig.nodeId;// ww w. j  a  va2 s . c om
    NodeControllerIPCI ipci = new NodeControllerIPCI();
    ipc = new IPCSystem(new InetSocketAddress(ncConfig.clusterNetIPAddress, ncConfig.clusterNetPort), ipci,
            new CCNCFunctions.SerializerDeserializer());

    this.ctx = new RootHyracksContext(this, new IOManager(getDevices(ncConfig.ioDevices)));
    if (id == null) {
        throw new Exception("id not set");
    }
    partitionManager = new PartitionManager(this);
    netManager = new NetworkManager(ncConfig.dataIPAddress, ncConfig.dataPort, partitionManager,
            ncConfig.nNetThreads, ncConfig.nNetBuffers, ncConfig.dataPublicIPAddress, ncConfig.dataPublicPort);

    lccm = new LifeCycleComponentManager();
    queue = new WorkQueue();
    jobletMap = new Hashtable<JobId, Joblet>();
    timer = new Timer(true);
    serverCtx = new ServerContext(ServerContext.ServerType.NODE_CONTROLLER,
            new File(new File(NodeControllerService.class.getName()), id));
    memoryMXBean = ManagementFactory.getMemoryMXBean();
    gcMXBeans = ManagementFactory.getGarbageCollectorMXBeans();
    threadMXBean = ManagementFactory.getThreadMXBean();
    runtimeMXBean = ManagementFactory.getRuntimeMXBean();
    osMXBean = ManagementFactory.getOperatingSystemMXBean();
    registrationPending = true;
    getNodeControllerInfosAcceptor = new MutableObject<FutureValue<Map<String, NodeControllerInfo>>>();
    memoryManager = new MemoryManager(
            (long) (memoryMXBean.getHeapMemoryUsage().getMax() * MEMORY_FUDGE_FACTOR));
    ioCounter = new IOCounterFactory().getIOCounter();
}

From source file:fr.inria.wimmics.coresetimer.CoreseTimer.java

private long getCurrentlyUsedMemory() {
    return ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getUsed()
            + ManagementFactory.getMemoryMXBean().getNonHeapMemoryUsage().getUsed();
}

From source file:com.alibaba.jstorm.daemon.nimbus.TopologyMetricsRunnable.java

public TopologyMetricsRunnable(final NimbusData nimbusData) {
    setName(getClass().getSimpleName());

    this.nimbusData = nimbusData;

    this.localMode = nimbusData.isLocalMode();
    if (localMode) {
        this.metricStat = new AtomicIntegerArray(1);
        return;// ww w.  j  av  a  2s. c o m
    }

    LOG.info("create topology metrics runnable.");
    this.metricCache = nimbusData.getMetricCache();
    this.stormClusterState = nimbusData.getStormClusterState();
    this.isShutdown = nimbusData.getIsShutdown();

    clusterName = ConfigExtension.getClusterName(nimbusData.getConf());
    if (clusterName == null) {
        throw new RuntimeException("cluster.name property must be set in storm.yaml!");
    }

    this.maxPendingUploadMetrics = ConfigExtension.getMaxPendingMetricNum(nimbusData.getConf());
    this.metricStat = new AtomicIntegerArray(this.maxPendingUploadMetrics);

    int cnt = 0;
    for (int i = 0; i < maxPendingUploadMetrics; i++) {
        TopologyMetricDataInfo obj = getMetricDataInfoFromCache(i);
        if (obj != null) {
            this.metricStat.set(i, SET);
            cnt++;
        }
    }
    LOG.info("pending upload metrics: {}", cnt);

    // init alive topologies from zk
    this.refreshTopologies();
    this.refreshTopologiesThread = new AsyncLoopThread(new RefreshTopologiesThread());

    this.clusterMetricsUpdateExecutor = Executors.newSingleThreadScheduledExecutor();
    this.clusterMetricsUpdateExecutor.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            int secOffset = TimeUtils.secOffset();
            int offset = 55;
            if (secOffset < offset) {
                JStormUtils.sleepMs((offset - secOffset) * 1000);
            } else if (secOffset == offset) {
                // do nothing
            } else {
                JStormUtils.sleepMs((60 - secOffset + offset) * 1000);
            }

            LOG.info("cluster metrics force upload.");
            mergeAndUploadClusterMetrics();
        }
    }, 5, 60, TimeUnit.SECONDS);

    // track nimbus JVM heap
    JStormMetrics.registerWorkerGauge(JStormMetrics.NIMBUS_METRIC_KEY, MetricDef.MEMORY_USED,
            new AsmGauge(new Gauge<Double>() {
                @Override
                public Double getValue() {
                    MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean();
                    MemoryUsage memoryUsage = memoryMXBean.getHeapMemoryUsage();
                    return (double) memoryUsage.getUsed();
                }
            }));
}