Example usage for java.lang.management ManagementFactory getMemoryMXBean

List of usage examples for java.lang.management ManagementFactory getMemoryMXBean

Introduction

In this page you can find the example usage for java.lang.management ManagementFactory getMemoryMXBean.

Prototype

public static MemoryMXBean getMemoryMXBean() 

Source Link

Document

Returns the managed bean for the memory system of the Java virtual machine.

Usage

From source file:org.apache.hadoop.hbase.regionserver.HRegionServer.java

ClusterStatusProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime) {
    // We're getting the MetricsRegionServerWrapper here because the wrapper computes requests
    // per second, and other metrics  As long as metrics are part of ServerLoad it's best to use
    // the wrapper to compute those numbers in one place.
    // In the long term most of these should be moved off of ServerLoad and the heart beat.
    // Instead they should be stored in an HBase table so that external visibility into HBase is
    // improved; Additionally the load balancer will be able to take advantage of a more complete
    // history./*from w ww.java  2  s  .  c  om*/
    MetricsRegionServerWrapper regionServerWrapper = this.metricsRegionServer.getRegionServerWrapper();
    Collection<HRegion> regions = getOnlineRegionsLocalContext();
    MemoryUsage memory = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();

    ClusterStatusProtos.ServerLoad.Builder serverLoad = ClusterStatusProtos.ServerLoad.newBuilder();
    serverLoad.setNumberOfRequests((int) regionServerWrapper.getRequestsPerSecond());
    serverLoad.setTotalNumberOfRequests((int) regionServerWrapper.getTotalRequestCount());
    serverLoad.setUsedHeapMB((int) (memory.getUsed() / 1024 / 1024));
    serverLoad.setMaxHeapMB((int) (memory.getMax() / 1024 / 1024));
    Set<String> coprocessors = this.hlog.getCoprocessorHost().getCoprocessors();
    for (String coprocessor : coprocessors) {
        serverLoad.addCoprocessors(Coprocessor.newBuilder().setName(coprocessor).build());
    }
    RegionLoad.Builder regionLoadBldr = RegionLoad.newBuilder();
    RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder();
    for (HRegion region : regions) {
        serverLoad.addRegionLoads(createRegionLoad(region, regionLoadBldr, regionSpecifier));
    }
    serverLoad.setReportStartTime(reportStartTime);
    serverLoad.setReportEndTime(reportEndTime);
    if (this.infoServer != null) {
        serverLoad.setInfoServerPort(this.infoServer.getPort());
    } else {
        serverLoad.setInfoServerPort(-1);
    }
    return serverLoad.build();
}

From source file:org.apache.hadoop.hbase.regionserver.MemcacheFlusher.java

/**
 * @param conf/*from   ww  w . j  ava  2 s  .c  o m*/
 * @param server
 */
public MemcacheFlusher(final HBaseConfiguration conf, final HRegionServer server) {
    super();
    this.server = server;
    this.threadWakeFrequency = conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000);
    long max = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
    this.globalMemcacheLimit = globalMemcacheLimit(max, DEFAULT_UPPER, UPPER_KEY, conf);
    long lower = globalMemcacheLimit(max, DEFAULT_LOWER, LOWER_KEY, conf);
    if (lower > this.globalMemcacheLimit) {
        lower = this.globalMemcacheLimit;
        LOG.info("Setting globalMemcacheLimitLowMark == globalMemcacheLimit " + "because supplied " + LOWER_KEY
                + " was > " + UPPER_KEY);
    }
    this.globalMemcacheLimitLowMark = lower;
    this.blockingStoreFilesNumber = conf.getInt("hbase.hstore.blockingStoreFiles", -1);
    if (this.blockingStoreFilesNumber == -1) {
        this.blockingStoreFilesNumber = 1 + conf.getInt("hbase.hstore.compactionThreshold", 3);
    }
    this.blockingWaitTime = conf.getInt("hbase.hstore.blockingWaitTime", 90000); // default of 180 seconds
    LOG.info("globalMemcacheLimit=" + StringUtils.humanReadableInt(this.globalMemcacheLimit)
            + ", globalMemcacheLimitLowMark=" + StringUtils.humanReadableInt(this.globalMemcacheLimitLowMark)
            + ", maxHeap=" + StringUtils.humanReadableInt(max));
}

From source file:org.apache.hadoop.hbase.regionserver.MemStoreChunkPool.java

/**
 * @param conf/*  w  w w.  ja  v  a 2 s  .com*/
 * @return the global MemStoreChunkPool instance
 */
static MemStoreChunkPool getPool(Configuration conf) {
    if (globalInstance != null)
        return globalInstance;
    if (chunkPoolDisabled)
        return null;

    synchronized (MemStoreChunkPool.class) {
        if (globalInstance != null)
            return globalInstance;
        float poolSizePercentage = conf.getFloat(CHUNK_POOL_MAXSIZE_KEY, POOL_MAX_SIZE_DEFAULT);
        if (poolSizePercentage <= 0) {
            chunkPoolDisabled = true;
            return null;
        }
        if (poolSizePercentage > 1.0) {
            throw new IllegalArgumentException(CHUNK_POOL_MAXSIZE_KEY + " must be between 0.0 and 1.0");
        }
        long heapMax = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
        long globalMemStoreLimit = (long) (heapMax * MemStoreFlusher.getGlobalMemStorePercent(conf));
        int chunkSize = conf.getInt(HeapMemStoreLAB.CHUNK_SIZE_KEY, HeapMemStoreLAB.CHUNK_SIZE_DEFAULT);
        int maxCount = (int) (globalMemStoreLimit * poolSizePercentage / chunkSize);

        float initialCountPercentage = conf.getFloat(CHUNK_POOL_INITIALSIZE_KEY, POOL_INITIAL_SIZE_DEFAULT);
        if (initialCountPercentage > 1.0 || initialCountPercentage < 0) {
            throw new IllegalArgumentException(CHUNK_POOL_INITIALSIZE_KEY + " must be between 0.0 and 1.0");
        }

        int initialCount = (int) (initialCountPercentage * maxCount);
        LOG.info("Allocating MemStoreChunkPool with chunk size " + StringUtils.byteDesc(chunkSize)
                + ", max count " + maxCount + ", initial count " + initialCount);
        globalInstance = new MemStoreChunkPool(conf, chunkSize, maxCount, initialCount);
        return globalInstance;
    }
}

From source file:org.apache.hadoop.hbase.regionserver.MemStoreFlusher.java

/**
 * @param conf//  www  . j  a va 2s  .c o m
 * @param server
 */
public MemStoreFlusher(final Configuration conf, final HRegionServer server) {
    super();
    this.server = server;
    this.threadWakeFrequency = conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000);
    long max = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
    float globalMemStorePercent = getGlobalMemStorePercent(conf);
    this.globalMemStoreLimit = (long) (max * globalMemStorePercent);
    this.globalMemStoreLimitLowMarkPercent = getGlobalMemStoreLowerMark(conf, globalMemStorePercent);
    this.globalMemStoreLimitLowMark = (long) (this.globalMemStoreLimit
            * this.globalMemStoreLimitLowMarkPercent);

    this.blockingWaitTime = conf.getInt("hbase.hstore.blockingWaitTime", 90000);
    int handlerCount = conf.getInt("hbase.hstore.flusher.count", 2);
    this.flushHandlers = new FlushHandler[handlerCount];
    LOG.info("globalMemStoreLimit=" + StringUtils.humanReadableInt(this.globalMemStoreLimit)
            + ", globalMemStoreLimitLowMark=" + StringUtils.humanReadableInt(this.globalMemStoreLimitLowMark)
            + ", maxHeap=" + StringUtils.humanReadableInt(max));
}

From source file:org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics.java

@Override
public String toString() {
    StringBuilder sb = new StringBuilder();
    sb = Strings.appendKeyValue(sb, "requestsPerSecond",
            Integer.valueOf((int) this.requests.getPreviousIntervalValue()));
    sb = Strings.appendKeyValue(sb, "numberOfOnlineRegions", Integer.valueOf(this.regions.get()));
    sb = Strings.appendKeyValue(sb, "numberOfStores", Integer.valueOf(this.stores.get()));
    sb = Strings.appendKeyValue(sb, "numberOfStorefiles", Integer.valueOf(this.storefiles.get()));
    sb = Strings.appendKeyValue(sb, this.storefileIndexSizeMB.getName(),
            Integer.valueOf(this.storefileIndexSizeMB.get()));
    sb = Strings.appendKeyValue(sb, "rootIndexSizeKB", Integer.valueOf(this.rootIndexSizeKB.get()));
    sb = Strings.appendKeyValue(sb, "totalStaticIndexSizeKB",
            Integer.valueOf(this.totalStaticIndexSizeKB.get()));
    sb = Strings.appendKeyValue(sb, "totalStaticBloomSizeKB",
            Integer.valueOf(this.totalStaticBloomSizeKB.get()));
    sb = Strings.appendKeyValue(sb, this.memstoreSizeMB.getName(), Integer.valueOf(this.memstoreSizeMB.get()));
    sb = Strings.appendKeyValue(sb, "mbInMemoryWithoutWAL", Integer.valueOf(this.mbInMemoryWithoutWAL.get()));
    sb = Strings.appendKeyValue(sb, "numberOfPutsWithoutWAL", Long.valueOf(this.numPutsWithoutWAL.get()));
    sb = Strings.appendKeyValue(sb, "readRequestsCount", Long.valueOf(this.readRequestsCount.get()));
    sb = Strings.appendKeyValue(sb, "writeRequestsCount", Long.valueOf(this.writeRequestsCount.get()));
    sb = Strings.appendKeyValue(sb, "compactionQueueSize", Integer.valueOf(this.compactionQueueSize.get()));
    sb = Strings.appendKeyValue(sb, "flushQueueSize", Integer.valueOf(this.flushQueueSize.get()));
    // Duplicate from jvmmetrics because metrics are private there so
    // inaccessible.
    MemoryUsage memory = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
    sb = Strings.appendKeyValue(sb, "usedHeapMB", Long.valueOf(memory.getUsed() / MB));
    sb = Strings.appendKeyValue(sb, "maxHeapMB", Long.valueOf(memory.getMax() / MB));
    sb = Strings.appendKeyValue(sb, this.blockCacheSize.getName() + "MB",
            StringUtils.limitDecimalTo2((float) this.blockCacheSize.get() / MB));
    sb = Strings.appendKeyValue(sb, this.blockCacheFree.getName() + "MB",
            StringUtils.limitDecimalTo2((float) this.blockCacheFree.get() / MB));
    sb = Strings.appendKeyValue(sb, this.blockCacheCount.getName(), Long.valueOf(this.blockCacheCount.get()));
    sb = Strings.appendKeyValue(sb, this.blockCacheHitCount.getName(),
            Long.valueOf(this.blockCacheHitCount.get()));
    sb = Strings.appendKeyValue(sb, this.blockCacheMissCount.getName(),
            Long.valueOf(this.blockCacheMissCount.get()));
    sb = Strings.appendKeyValue(sb, this.blockCacheEvictedCount.getName(),
            Long.valueOf(this.blockCacheEvictedCount.get()));
    sb = Strings.appendKeyValue(sb, this.blockCacheHitRatio.getName(),
            Long.valueOf(this.blockCacheHitRatio.get()) + "%");
    sb = Strings.appendKeyValue(sb, this.blockCacheHitCachingRatio.getName(),
            Long.valueOf(this.blockCacheHitCachingRatio.get()) + "%");
    sb = Strings.appendKeyValue(sb, this.hdfsBlocksLocalityIndex.getName(),
            Long.valueOf(this.hdfsBlocksLocalityIndex.get()));
    sb = Strings.appendKeyValue(sb, "slowHLogAppendCount", Long.valueOf(this.slowHLogAppendCount.get()));
    sb = appendHistogram(sb, this.fsReadLatencyHistogram);
    sb = appendHistogram(sb, this.fsPreadLatencyHistogram);
    sb = appendHistogram(sb, this.fsWriteLatencyHistogram);

    return sb.toString();
}

From source file:org.apache.hadoop.hbase.regionserver.TestCompactingMemStore.java

/**
 * Test a pathological pattern that shows why we can't currently
 * use the MSLAB for upsert workloads. This test inserts data
 * in the following pattern://w  w w  .ja  v a  2s. c  o m
 *
 * - row0001 through row1000 (fills up one 2M Chunk)
 * - row0002 through row1001 (fills up another 2M chunk, leaves one reference
 *   to the first chunk
 * - row0003 through row1002 (another chunk, another dangling reference)
 *
 * This causes OOME pretty quickly if we use MSLAB for upsert
 * since each 2M chunk is held onto by a single reference.
 */
@Override
@Test
public void testUpsertMSLAB() throws Exception {

    int ROW_SIZE = 2048;
    byte[] qualifier = new byte[ROW_SIZE - 4];

    MemoryMXBean bean = ManagementFactory.getMemoryMXBean();
    for (int i = 0; i < 3; i++) {
        System.gc();
    }
    long usageBefore = bean.getHeapMemoryUsage().getUsed();

    long size = 0;
    long ts = 0;

    for (int newValue = 0; newValue < 1000; newValue++) {
        for (int row = newValue; row < newValue + 1000; row++) {
            byte[] rowBytes = Bytes.toBytes(row);
            size += memstore.updateColumnValue(rowBytes, FAMILY, qualifier, newValue, ++ts);
        }
    }
    System.out.println("Wrote " + ts + " vals");
    for (int i = 0; i < 3; i++) {
        System.gc();
    }
    long usageAfter = bean.getHeapMemoryUsage().getUsed();
    System.out.println("Memory used: " + (usageAfter - usageBefore) + " (heapsize: " + memstore.heapSize()
            + " size: " + size + ")");
}

From source file:org.apache.hadoop.hbase.regionserver.TestDefaultMemStore.java

/**
 * Test a pathological pattern that shows why we can't currently
 * use the MSLAB for upsert workloads. This test inserts data
 * in the following pattern:/* w w  w .ja  v a2  s .c  o m*/
 *
 * - row0001 through row1000 (fills up one 2M Chunk)
 * - row0002 through row1001 (fills up another 2M chunk, leaves one reference
 *   to the first chunk
 * - row0003 through row1002 (another chunk, another dangling reference)
 *
 * This causes OOME pretty quickly if we use MSLAB for upsert
 * since each 2M chunk is held onto by a single reference.
 */
public void testUpsertMSLAB() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    conf.setBoolean(DefaultMemStore.USEMSLAB_KEY, true);
    memstore = new DefaultMemStore(conf, KeyValue.COMPARATOR);

    int ROW_SIZE = 2048;
    byte[] qualifier = new byte[ROW_SIZE - 4];

    MemoryMXBean bean = ManagementFactory.getMemoryMXBean();
    for (int i = 0; i < 3; i++) {
        System.gc();
    }
    long usageBefore = bean.getHeapMemoryUsage().getUsed();

    long size = 0;
    long ts = 0;

    for (int newValue = 0; newValue < 1000; newValue++) {
        for (int row = newValue; row < newValue + 1000; row++) {
            byte[] rowBytes = Bytes.toBytes(row);
            size += memstore.updateColumnValue(rowBytes, FAMILY, qualifier, newValue, ++ts);
        }
    }
    System.out.println("Wrote " + ts + " vals");
    for (int i = 0; i < 3; i++) {
        System.gc();
    }
    long usageAfter = bean.getHeapMemoryUsage().getUsed();
    System.out.println("Memory used: " + (usageAfter - usageBefore) + " (heapsize: " + memstore.heapSize()
            + " size: " + size + ")");
}

From source file:org.apache.hadoop.hbase.regionserver.TestMemStore.java

/**
 * Test a pathological pattern that shows why we can't currently
 * use the MSLAB for upsert workloads. This test inserts data
 * in the following pattern://w  w  w  .ja  v a2s .  co  m
 * 
 * - row0001 through row1000 (fills up one 2M Chunk)
 * - row0002 through row1001 (fills up another 2M chunk, leaves one reference
 *   to the first chunk
 * - row0003 through row1002 (another chunk, another dangling reference)
 * 
 * This causes OOME pretty quickly if we use MSLAB for upsert
 * since each 2M chunk is held onto by a single reference.
 */
public void testUpsertMSLAB() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    conf.setBoolean(MemStore.USEMSLAB_KEY, true);
    memstore = new MemStore(conf, KeyValue.COMPARATOR);

    int ROW_SIZE = 2048;
    byte[] qualifier = new byte[ROW_SIZE - 4];

    MemoryMXBean bean = ManagementFactory.getMemoryMXBean();
    for (int i = 0; i < 3; i++) {
        System.gc();
    }
    long usageBefore = bean.getHeapMemoryUsage().getUsed();

    long size = 0;
    long ts = 0;

    for (int newValue = 0; newValue < 1000; newValue++) {
        for (int row = newValue; row < newValue + 1000; row++) {
            byte[] rowBytes = Bytes.toBytes(row);
            size += memstore.updateColumnValue(rowBytes, FAMILY, qualifier, newValue, ++ts);
        }
    }
    System.out.println("Wrote " + ts + " vals");
    for (int i = 0; i < 3; i++) {
        System.gc();
    }
    long usageAfter = bean.getHeapMemoryUsage().getUsed();
    System.out.println("Memory used: " + (usageAfter - usageBefore) + " (heapsize: " + memstore.heapSize()
            + " size: " + size + ")");
}

From source file:org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.java

private int calculateMaxLogFiles(float memstoreSizeRatio, long logRollSize) {
    MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
    return Math.round(mu.getMax() * memstoreSizeRatio * 2 / logRollSize);
}

From source file:org.apache.hadoop.hive.ql.exec.ExecDriver.java

public static void main(String[] args) throws IOException, HiveException {

    String planFileName = null;/*w w w.  j a va  2  s.  c  om*/
    ArrayList<String> jobConfArgs = new ArrayList<String>();
    boolean noLog = false;
    String files = null;
    boolean localtask = false;
    try {
        for (int i = 0; i < args.length; i++) {
            if (args[i].equals("-plan")) {
                planFileName = args[++i];
            } else if (args[i].equals("-jobconf")) {
                jobConfArgs.add(args[++i]);
            } else if (args[i].equals("-nolog")) {
                noLog = true;
            } else if (args[i].equals("-files")) {
                files = args[++i];
            } else if (args[i].equals("-localtask")) {
                localtask = true;
            }
        }
    } catch (IndexOutOfBoundsException e) {
        System.err.println("Missing argument to option");
        printUsage();
    }

    JobConf conf;
    if (localtask) {
        conf = new JobConf(MapredLocalTask.class);
    } else {
        conf = new JobConf(ExecDriver.class);
    }
    StringBuilder sb = new StringBuilder("JobConf:\n");

    for (String one : jobConfArgs) {
        int eqIndex = one.indexOf('=');
        if (eqIndex != -1) {
            try {
                String key = one.substring(0, eqIndex);
                String value = URLDecoder.decode(one.substring(eqIndex + 1), "UTF-8");
                conf.set(key, value);
                sb.append(key).append("=").append(value).append("\n");
            } catch (UnsupportedEncodingException e) {
                System.err.println(
                        "Unexpected error " + e.getMessage() + " while encoding " + one.substring(eqIndex + 1));
                System.exit(3);
            }
        }
    }

    if (files != null) {
        conf.set("tmpfiles", files);
    }

    boolean isSilent = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESESSIONSILENT);

    if (noLog) {
        // If started from main(), and noLog is on, we should not output
        // any logs. To turn the log on, please set -Dtest.silent=false
        BasicConfigurator.resetConfiguration();
        BasicConfigurator.configure(new NullAppender());
    } else {
        setupChildLog4j(conf);
    }

    Log LOG = LogFactory.getLog(ExecDriver.class.getName());
    LogHelper console = new LogHelper(LOG, isSilent);

    if (planFileName == null) {
        console.printError("Must specify Plan File Name");
        printUsage();
    }

    // print out the location of the log file for the user so
    // that it's easy to find reason for local mode execution failures
    for (Appender appender : Collections
            .list((Enumeration<Appender>) LogManager.getRootLogger().getAllAppenders())) {
        if (appender instanceof FileAppender) {
            console.printInfo("Execution log at: " + ((FileAppender) appender).getFile());
        }
    }

    // log the list of job conf parameters for reference
    LOG.info(sb.toString());

    // the plan file should always be in local directory
    Path p = new Path(planFileName);
    FileSystem fs = FileSystem.getLocal(conf);
    InputStream pathData = fs.open(p);

    // this is workaround for hadoop-17 - libjars are not added to classpath of the
    // child process. so we add it here explicitly

    String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS);
    String addedJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEADDEDJARS);
    try {
        // see also - code in CliDriver.java
        ClassLoader loader = conf.getClassLoader();
        if (StringUtils.isNotBlank(auxJars)) {
            loader = Utilities.addToClassPath(loader, StringUtils.split(auxJars, ","));
        }
        if (StringUtils.isNotBlank(addedJars)) {
            loader = Utilities.addToClassPath(loader, StringUtils.split(addedJars, ","));
        }
        conf.setClassLoader(loader);
        // Also set this to the Thread ContextClassLoader, so new threads will
        // inherit
        // this class loader, and propagate into newly created Configurations by
        // those
        // new threads.
        Thread.currentThread().setContextClassLoader(loader);
    } catch (Exception e) {
        throw new HiveException(e.getMessage(), e);
    }
    int ret;
    if (localtask) {
        memoryMXBean = ManagementFactory.getMemoryMXBean();
        MapredLocalWork plan = Utilities.deserializeMapRedLocalWork(pathData, conf);
        MapredLocalTask ed = new MapredLocalTask(plan, conf, isSilent);
        ret = ed.executeFromChildJVM(new DriverContext());

    } else {
        MapredWork plan = Utilities.deserializeMapRedWork(pathData, conf);
        ExecDriver ed = new ExecDriver(plan, conf, isSilent);
        ret = ed.execute(new DriverContext());
    }

    if (ret != 0) {
        System.exit(2);
    }
}