List of usage examples for java.lang.management ManagementFactory getMemoryMXBean
public static MemoryMXBean getMemoryMXBean()
From source file:no.joachimhs.server.JettyServer.java
private static void start() throws Exception { Long bytes = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax(); if (bytes != null && bytes.longValue() > 0l) { megabytes = bytes / (1024 * 1024); }//ww w . ja v a 2s. c o m log.info("Configured max HEAP memory: " + megabytes + " MB."); if (megabytes.longValue() < 100) { log.severe( "EurekaJ requires at lest 100MB of HEAP to operate. Please set your -Xmx option accordingly (minimum -Xmx128m)."); System.exit(-1); } if (jettyServer != null && jettyServer.isRunning()) { log.warning("JettyServer.start() called, but the server is already started."); return; } configure(); jettyServer.start(); localPort = jettyServer.getConnectors()[0].getLocalPort(); log.info("JettyServer started at http://localhost:" + localPort + "/"); }
From source file:org.apache.flink.runtime.metrics.util.MetricUtils.java
private static void instantiateMemoryMetrics(MetricGroup metrics) { final MemoryMXBean mxBean = ManagementFactory.getMemoryMXBean(); MetricGroup heap = metrics.addGroup("Heap"); heap.gauge("Used", new Gauge<Long>() { @Override//from ww w . jav a 2 s. c om public Long getValue() { return mxBean.getHeapMemoryUsage().getUsed(); } }); heap.gauge("Committed", new Gauge<Long>() { @Override public Long getValue() { return mxBean.getHeapMemoryUsage().getCommitted(); } }); heap.gauge("Max", new Gauge<Long>() { @Override public Long getValue() { return mxBean.getHeapMemoryUsage().getMax(); } }); MetricGroup nonHeap = metrics.addGroup("NonHeap"); nonHeap.gauge("Used", new Gauge<Long>() { @Override public Long getValue() { return mxBean.getNonHeapMemoryUsage().getUsed(); } }); nonHeap.gauge("Committed", new Gauge<Long>() { @Override public Long getValue() { return mxBean.getNonHeapMemoryUsage().getCommitted(); } }); nonHeap.gauge("Max", new Gauge<Long>() { @Override public Long getValue() { return mxBean.getNonHeapMemoryUsage().getMax(); } }); List<BufferPoolMXBean> bufferMxBeans = ManagementFactory.getPlatformMXBeans(BufferPoolMXBean.class); for (final BufferPoolMXBean bufferMxBean : bufferMxBeans) { MetricGroup bufferGroup = metrics.addGroup(WordUtils.capitalize(bufferMxBean.getName())); bufferGroup.gauge("Count", new Gauge<Long>() { @Override public Long getValue() { return bufferMxBean.getCount(); } }); bufferGroup.gauge("MemoryUsed", new Gauge<Long>() { @Override public Long getValue() { return bufferMxBean.getMemoryUsed(); } }); bufferGroup.gauge("TotalCapacity", new Gauge<Long>() { @Override public Long getValue() { return bufferMxBean.getTotalCapacity(); } }); } }
From source file:org.apache.flink.runtime.taskmanager.TaskManager.java
public TaskManager(ExecutionMode executionMode, JobManagerProtocol jobManager, InputSplitProviderProtocol splitProvider, ChannelLookupProtocol channelLookup, AccumulatorProtocol accumulators, InetSocketAddress jobManagerAddress, InetAddress taskManagerBindAddress) throws Exception { if (executionMode == null || jobManager == null || splitProvider == null || channelLookup == null || accumulators == null) {/*from w ww .j ava 2s . c o m*/ throw new NullPointerException(); } LOG.info("TaskManager execution mode: " + executionMode); this.executionMode = executionMode; this.jobManager = jobManager; this.lookupService = channelLookup; this.globalInputSplitProvider = splitProvider; this.accumulatorProtocolProxy = accumulators; // initialize the number of slots { int slots = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, -1); if (slots == -1) { slots = 1; LOG.info("Number of task slots not configured. Creating one task slot."); } else if (slots <= 0) { throw new Exception("Illegal value for the number of task slots: " + slots); } else { LOG.info("Creating " + slots + " task slot(s)."); } this.numberOfSlots = slots; } int ipcPort = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_IPC_PORT_KEY, -1); int dataPort = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_DATA_PORT_KEY, -1); if (ipcPort == -1) { ipcPort = getAvailablePort(); } if (dataPort == -1) { dataPort = getAvailablePort(); } this.localInstanceConnectionInfo = new InstanceConnectionInfo(taskManagerBindAddress, ipcPort, dataPort); LOG.info("TaskManager connection information:" + this.localInstanceConnectionInfo); // Start local RPC server, give it the number of threads as we have slots try { // some magic number for the handler threads final int numHandlers = Math.min(numberOfSlots, 2 * Hardware.getNumberCPUCores()); this.taskManagerServer = RPC.getServer(this, taskManagerBindAddress.getHostAddress(), ipcPort, numHandlers); this.taskManagerServer.start(); } catch (IOException e) { LOG.error("Failed to start TaskManager server. " + e.getMessage(), e); throw new Exception("Failed to start taskmanager server. " + e.getMessage(), e); } // Load profiler if it should be used if (GlobalConfiguration.getBoolean(ProfilingUtils.ENABLE_PROFILING_KEY, false)) { final String profilerClassName = GlobalConfiguration.getString(ProfilingUtils.TASKMANAGER_CLASSNAME_KEY, "org.apache.flink.runtime.profiling.impl.TaskManagerProfilerImpl"); this.profiler = ProfilingUtils.loadTaskManagerProfiler(profilerClassName, jobManagerAddress.getAddress(), this.localInstanceConnectionInfo); if (this.profiler == null) { LOG.error("Cannot find class name for the profiler."); } else { LOG.info("Profiling of jobs is enabled."); } } else { this.profiler = null; LOG.info("Profiling of jobs is disabled."); } // Get the directory for storing temporary files final String[] tmpDirPaths = GlobalConfiguration .getString(ConfigConstants.TASK_MANAGER_TMP_DIR_KEY, ConfigConstants.DEFAULT_TASK_MANAGER_TMP_PATH) .split(",|" + File.pathSeparator); checkTempDirs(tmpDirPaths); int numBuffers = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_NETWORK_NUM_BUFFERS_KEY, ConfigConstants.DEFAULT_TASK_MANAGER_NETWORK_NUM_BUFFERS); int bufferSize = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_NETWORK_BUFFER_SIZE_KEY, ConfigConstants.DEFAULT_TASK_MANAGER_NETWORK_BUFFER_SIZE); // Initialize the channel manager try { NetworkConnectionManager networkConnectionManager = null; switch (executionMode) { case LOCAL: networkConnectionManager = new LocalConnectionManager(); break; case CLUSTER: int numInThreads = GlobalConfiguration.getInteger( ConfigConstants.TASK_MANAGER_NET_NUM_IN_THREADS_KEY, ConfigConstants.DEFAULT_TASK_MANAGER_NET_NUM_IN_THREADS); int numOutThreads = GlobalConfiguration.getInteger( ConfigConstants.TASK_MANAGER_NET_NUM_OUT_THREADS_KEY, ConfigConstants.DEFAULT_TASK_MANAGER_NET_NUM_OUT_THREADS); int lowWaterMark = GlobalConfiguration.getInteger( ConfigConstants.TASK_MANAGER_NET_NETTY_LOW_WATER_MARK, ConfigConstants.DEFAULT_TASK_MANAGER_NET_NETTY_LOW_WATER_MARK); int highWaterMark = GlobalConfiguration.getInteger( ConfigConstants.TASK_MANAGER_NET_NETTY_HIGH_WATER_MARK, ConfigConstants.DEFAULT_TASK_MANAGER_NET_NETTY_HIGH_WATER_MARK); networkConnectionManager = new NettyConnectionManager(localInstanceConnectionInfo.address(), localInstanceConnectionInfo.dataPort(), bufferSize, numInThreads, numOutThreads, lowWaterMark, highWaterMark); break; } channelManager = new ChannelManager(lookupService, localInstanceConnectionInfo, numBuffers, bufferSize, networkConnectionManager); } catch (IOException ioe) { LOG.error(StringUtils.stringifyException(ioe)); throw new Exception("Failed to instantiate ChannelManager.", ioe); } // initialize the memory manager { // Check whether the memory size has been explicitly configured. final long configuredMemorySize = GlobalConfiguration .getInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, -1); final long memorySize; if (configuredMemorySize == -1) { // no manually configured memory. take a relative fraction of the free heap space float fraction = GlobalConfiguration.getFloat(ConfigConstants.TASK_MANAGER_MEMORY_FRACTION_KEY, ConfigConstants.DEFAULT_MEMORY_MANAGER_MEMORY_FRACTION); memorySize = (long) (EnvironmentInformation.getSizeOfFreeHeapMemoryWithDefrag() * fraction); LOG.info("Using " + fraction + " of the free heap space for managed memory."); } else if (configuredMemorySize <= 0) { throw new Exception("Invalid value for Memory Manager memory size: " + configuredMemorySize); } else { memorySize = configuredMemorySize << 20; } final int pageSize = GlobalConfiguration.getInteger( ConfigConstants.TASK_MANAGER_NETWORK_BUFFER_SIZE_KEY, ConfigConstants.DEFAULT_TASK_MANAGER_NETWORK_BUFFER_SIZE); // Initialize the memory manager LOG.info("Initializing memory manager with " + (memorySize >>> 20) + " megabytes of memory. " + "Page size is " + pageSize + " bytes."); try { @SuppressWarnings("unused") final boolean lazyAllocation = GlobalConfiguration.getBoolean( ConfigConstants.TASK_MANAGER_MEMORY_LAZY_ALLOCATION_KEY, ConfigConstants.DEFAULT_TASK_MANAGER_MEMORY_LAZY_ALLOCATION); this.memoryManager = new DefaultMemoryManager(memorySize, this.numberOfSlots, pageSize); } catch (Throwable t) { LOG.error( "Unable to initialize memory manager with " + (memorySize >>> 20) + " megabytes of memory.", t); throw new Exception("Unable to initialize memory manager.", t); } } this.hardwareDescription = HardwareDescription.extractFromSystem(this.memoryManager.getMemorySize()); // Determine the port of the BLOB server and register it with the library cache manager { final int blobPort = this.jobManager.getBlobServerPort(); if (blobPort == -1) { LOG.warn("Unable to determine BLOB server address: User library download will not be available"); this.libraryCacheManager = new FallbackLibraryCacheManager(); } else { final InetSocketAddress blobServerAddress = new InetSocketAddress(jobManagerAddress.getAddress(), blobPort); LOG.info("Determined BLOB server address to be " + blobServerAddress); this.libraryCacheManager = new BlobLibraryCacheManager(new BlobCache(blobServerAddress), GlobalConfiguration.getConfiguration()); } } this.ioManager = new IOManagerAsync(tmpDirPaths); // start the heart beats { final long interval = GlobalConfiguration.getInteger( ConfigConstants.TASK_MANAGER_HEARTBEAT_INTERVAL_KEY, ConfigConstants.DEFAULT_TASK_MANAGER_HEARTBEAT_INTERVAL); this.heartbeatThread = new Thread() { @Override public void run() { registerAndRunHeartbeatLoop(interval, MAX_LOST_HEART_BEATS); } }; this.heartbeatThread.setName("Heartbeat Thread"); this.heartbeatThread.start(); } // -------------------------------------------------------------------- // Memory Usage // -------------------------------------------------------------------- final MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean(); final List<GarbageCollectorMXBean> gcMXBeans = ManagementFactory.getGarbageCollectorMXBeans(); LOG.info(getMemoryUsageStatsAsString(memoryMXBean)); boolean startMemoryUsageLogThread = GlobalConfiguration.getBoolean( ConfigConstants.TASK_MANAGER_DEBUG_MEMORY_USAGE_START_LOG_THREAD, ConfigConstants.DEFAULT_TASK_MANAGER_DEBUG_MEMORY_USAGE_START_LOG_THREAD); if (startMemoryUsageLogThread) { final int logIntervalMs = GlobalConfiguration.getInteger( ConfigConstants.TASK_MANAGER_DEBUG_MEMORY_USAGE_LOG_INTERVAL_MS, ConfigConstants.DEFAULT_TASK_MANAGER_DEBUG_MEMORY_USAGE_LOG_INTERVAL_MS); new Thread(new Runnable() { @Override public void run() { try { while (!isShutDown()) { Thread.sleep(logIntervalMs); LOG.info(getMemoryUsageStatsAsString(memoryMXBean)); LOG.info(getGarbageCollectorStatsAsString(gcMXBeans)); } } catch (InterruptedException e) { LOG.warn("Unexpected interruption of memory usage logger thread."); } } }).start(); } }
From source file:org.apache.hadoop.gateway.websockets.WebsocketMultipleConnectionTest.java
/** * Test websocket proxying through gateway. * /*w ww . ja va2 s. c o m*/ * @throws Exception */ @Test public void testMultipleConnections() throws Exception { WebSocketContainer container = ContainerProvider.getWebSocketContainer(); final CountDownLatch latch = new CountDownLatch(MAX_CONNECTIONS); Session[] sessions = new Session[MAX_CONNECTIONS]; MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean(); System.gc(); final long heapt1 = memoryMXBean.getHeapMemoryUsage().getUsed(); final long nonHeapt1 = memoryMXBean.getNonHeapMemoryUsage().getUsed(); for (int i = 0; i < MAX_CONNECTIONS; i++) { sessions[i] = container.connectToServer(new WebsocketClient() { @Override public void onMessage(String message) { latch.countDown(); } }, new URI(serverUri.toString() + "gateway/websocket/ws")); } for (int i = 0; i < MAX_CONNECTIONS; i++) { /* make sure the session is active and valid before trying to connect */ if (sessions[i].isOpen() && sessions[i].getBasicRemote() != null) { sessions[i].getBasicRemote().sendText("OK"); } } latch.await(5 * MAX_CONNECTIONS, TimeUnit.MILLISECONDS); System.gc(); final long heapUsed = memoryMXBean.getHeapMemoryUsage().getUsed() - heapt1; final long nonHeapUsed = memoryMXBean.getNonHeapMemoryUsage().getUsed() - nonHeapt1; System.out.println("heapUsed = " + heapUsed); System.out.println("nonHeapUsed = " + nonHeapUsed); /* 90 KB per connection */ /* long expected = 90 * 1024 * MAX_CONNECTIONS; assertThat("heap used", heapUsed, lessThan(expected)); */ }
From source file:org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint.java
/** * Stores a reference to the coprocessor environment provided by the * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} * from the region where this coprocessor is loaded. * Since this is a coprocessor endpoint, it always expects to be loaded * on a table region, so always expects this to be an instance of * {@link RegionCoprocessorEnvironment}. * @param env the environment provided by the coprocessor host * @throws IOException if the provided environment is not an instance of * {@code RegionCoprocessorEnvironment}// w w w .jav a2 s . c o m */ @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { this.env = (RegionCoprocessorEnvironment) env; } else { throw new CoprocessorException( "TrxRegionEndpoint coprocessor: start - Must be loaded on a table region!"); } if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: start"); RegionCoprocessorEnvironment tmp_env = (RegionCoprocessorEnvironment) env; this.m_Region = tmp_env.getRegion(); this.regionInfo = this.m_Region.getRegionInfo(); this.t_Region = (TransactionalRegion) tmp_env.getRegion(); this.fs = this.m_Region.getFilesystem(); org.apache.hadoop.conf.Configuration conf = tmp_env.getConfiguration(); synchronized (stoppableLock) { try { this.transactionLeaseTimeout = conf.getInt(LEASE_CONF, MINIMUM_LEASE_TIME); if (this.transactionLeaseTimeout < MINIMUM_LEASE_TIME) { if (LOG.isWarnEnabled()) LOG.warn("Transaction lease time: " + this.transactionLeaseTimeout + ", was less than the minimum lease time. Now setting the timeout to the minimum default value: " + MINIMUM_LEASE_TIME); this.transactionLeaseTimeout = MINIMUM_LEASE_TIME; } this.scannerLeaseTimeoutPeriod = HBaseConfiguration.getInt(conf, HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD); this.scannerThreadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000); this.cleanTimer = conf.getInt(SLEEP_CONF, DEFAULT_SLEEP); this.memoryUsageThreshold = conf.getInt(MEMORY_THRESHOLD, DEFAULT_MEMORY_THRESHOLD); this.memoryUsagePerformGC = conf.getBoolean(MEMORY_PERFORM_GC, DEFAULT_MEMORY_PERFORM_GC); this.memoryUsageWarnOnly = conf.getBoolean(MEMORY_WARN_ONLY, DEFAULT_MEMORY_WARN_ONLY); this.memoryUsageTimer = conf.getInt(MEMORY_CONF, DEFAULT_MEMORY_SLEEP); this.memoryUsageTimer = conf.getInt(MEMORY_CONF, DEFAULT_MEMORY_SLEEP); this.suppressOutOfOrderProtocolException = conf.getBoolean(SUPPRESS_OOP, DEFAULT_SUPPRESS_OOP); if (this.transactionLeases == null) this.transactionLeases = new Leases(LEASE_CHECK_FREQUENCY); //if (this.scannerLeases == null) // this.scannerLeases = new Leases(scannerThreadWakeFrequency); if (LOG.isTraceEnabled()) LOG.trace("Transaction lease time: " + this.transactionLeaseTimeout + " Scanner lease time: " + this.scannerThreadWakeFrequency + ", Scanner lease timeout period: " + this.scannerLeaseTimeoutPeriod + ", Clean timer: " + this.cleanTimer + ", MemoryUsage timer: " + this.memoryUsageTimer + ", MemoryUsageThreshold: " + this.memoryUsageThreshold + ", MemoryUsagePerformGC: " + this.memoryUsagePerformGC + ", MemoryUsageWarnOnly: " + this.memoryUsageWarnOnly + ", Suppress OutOfOrderProtocolException: " + this.suppressOutOfOrderProtocolException); // Start the clean core thread this.cleanOldTransactionsThread = new CleanOldTransactionsChore(this, cleanTimer, stoppable); UncaughtExceptionHandler handler = new UncaughtExceptionHandler() { public void uncaughtException(final Thread t, final Throwable e) { LOG.fatal("CleanOldTransactionChore uncaughtException: " + t.getName(), e); } }; String n = Thread.currentThread().getName(); ChoreThread = new Thread(this.cleanOldTransactionsThread); Threads.setDaemonThreadRunning(ChoreThread, n + ".oldTransactionCleaner", handler); // Start the memory usage chore thread if the threshold // selected is greater than the default of 100%. if (memoryUsageThreshold < DEFAULT_MEMORY_THRESHOLD && memoryUsageThread == null) { LOG.warn("TrxRegionEndpoint coprocessor: start - starting memoryUsageThread"); memoryUsageThread = new MemoryUsageChore(this, memoryUsageTimer, stoppable2); UncaughtExceptionHandler handler2 = new UncaughtExceptionHandler() { public void uncaughtException(final Thread t, final Throwable e) { LOG.fatal("MemoryUsageChore uncaughtException: " + t.getName(), e); } }; String n2 = Thread.currentThread().getName(); ChoreThread2 = new Thread(memoryUsageThread); Threads.setDaemonThreadRunning(ChoreThread2, n2 + ".memoryUsage", handler2); } if (TransactionalLeasesThread == null) { TransactionalLeasesThread = new Thread(this.transactionLeases); if (TransactionalLeasesThread != null) { Threads.setDaemonThreadRunning(TransactionalLeasesThread, "Transactional leases"); } } /* if (ScannerLeasesThread == null) { ScannerLeasesThread = new Thread(this.scannerLeases); if (ScannerLeasesThread != null) { Threads.setDaemonThreadRunning(ScannerLeasesThread, "Scanner leases"); } } */ } catch (Exception e) { throw new CoprocessorException("TrxRegionEndpoint coprocessor: start - Caught exception " + e); } } this.t_Region = (TransactionalRegion) tmp_env.getRegion(); this.fs = this.m_Region.getFilesystem(); tHLog = this.m_Region.getLog(); RegionServerServices rss = tmp_env.getRegionServerServices(); ServerName sn = rss.getServerName(); lv_hostName = sn.getHostname(); lv_port = sn.getPort(); if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: hostname " + lv_hostName + " port " + lv_port); this.regionInfo = this.m_Region.getRegionInfo(); this.nextLogSequenceId = this.m_Region.getSequenceId(); this.t_Region = (TransactionalRegion) tmp_env.getRegion(); zkw1 = rss.getZooKeeper(); this.configuredEarlyLogging = tmp_env.getConfiguration() .getBoolean("hbase.regionserver.region.transactional.earlylogging", false); if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: early logging setting is " + this.configuredEarlyLogging + "\nTrxRegionEndpoint coprocessor: get the reference from Region CoprocessorEnvironment "); this.configuredConflictReinstate = tmp_env.getConfiguration() .getBoolean("hbase.regionserver.region.transactional.conflictreinstate", false); if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: conflict reinstate setting is " + this.configuredConflictReinstate + "\nTrxRegionEndpoint coprocessor: get the reference from Region CoprocessorEnvironment "); if (tmp_env.getSharedData().isEmpty()) if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: shared map is empty "); else if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: shared map is NOT empty"); transactionsEPCPMap.put(this.m_Region.getRegionNameAsString() + trxkeyEPCPinstance, this); transactionsByIdTestz = TrxRegionObserver.getRefMap(); if (transactionsByIdTestz.isEmpty()) { if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: reference map is empty "); } else { if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: reference map is NOT empty "); } if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: Region " + this.m_Region.getRegionNameAsString() + " check indoubt list from reference map "); Map<Long, List<WALEdit>> indoubtTransactionsByIdCheck = (TreeMap<Long, List<WALEdit>>) transactionsByIdTestz .get(this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeypendingTransactionsById); if (indoubtTransactionsByIdCheck != null) { this.indoubtTransactionsById = indoubtTransactionsByIdCheck; } else { transactionsByIdTestz.put( this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeypendingTransactionsById, this.indoubtTransactionsById); } Map<Integer, Integer> indoubtTransactionsCountByTmidCheck = (TreeMap<Integer, Integer>) transactionsByIdTestz .get(this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeyindoubtTransactionsCountByTmid); if (indoubtTransactionsCountByTmidCheck != null) { this.indoubtTransactionsCountByTmid = indoubtTransactionsCountByTmidCheck; } else { transactionsByIdTestz.put( this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeyindoubtTransactionsCountByTmid, this.indoubtTransactionsCountByTmid); } Set<TrxTransactionState> commitPendingTransactionsCheck = (Set<TrxTransactionState>) transactionsByIdTestz .get(this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeycommitPendingTransactions); if (commitPendingTransactionsCheck != null) { this.commitPendingTransactions = commitPendingTransactionsCheck; } else { transactionsByIdTestz.put( this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeycommitPendingTransactions, this.commitPendingTransactions); } ConcurrentHashMap<String, TrxTransactionState> transactionsByIdCheck = (ConcurrentHashMap<String, TrxTransactionState>) transactionsByIdTestz .get(this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeytransactionsById); if (transactionsByIdCheck != null) { this.transactionsById = transactionsByIdCheck; } else { transactionsByIdTestz.put( this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeytransactionsById, this.transactionsById); } AtomicBoolean closingCheck = (AtomicBoolean) transactionsByIdTestz .get(this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeyClosingVar); if (closingCheck != null) { this.closing = closingCheck; } else { transactionsByIdTestz.put(this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeyClosingVar, this.closing); } // Set up the memoryBean from the ManagementFactory if (memoryUsageThreshold < DEFAULT_MEMORY_THRESHOLD) memoryBean = ManagementFactory.getMemoryMXBean(); if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: start"); }
From source file:org.apache.hadoop.hbase.io.hfile.CacheConfig.java
/** * Returns the block cache or <code>null</code> in case none should be used. * * @param conf The current configuration. * @return The block cache or <code>null</code>. *///from ww w. java 2 s .c o m public static synchronized BlockCache instantiateBlockCache(Configuration conf) { if (GLOBAL_BLOCK_CACHE_INSTANCE != null) return GLOBAL_BLOCK_CACHE_INSTANCE; if (blockCacheDisabled) return null; float cachePercentage = conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); if (cachePercentage == 0L) { blockCacheDisabled = true; return null; } if (cachePercentage > 1.0) { throw new IllegalArgumentException( HConstants.HFILE_BLOCK_CACHE_SIZE_KEY + " must be between 0.0 and 1.0, and not > 1.0"); } // Calculate the amount of heap to give the heap. MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); long lruCacheSize = (long) (mu.getMax() * cachePercentage); int blockSize = conf.getInt("hbase.offheapcache.minblocksize", HConstants.DEFAULT_BLOCKSIZE); long slabCacheOffHeapCacheSize = (long) (conf.getFloat(SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY, (float) 0) * DirectMemoryUtils.getDirectMemorySize()); if (slabCacheOffHeapCacheSize <= 0) { String bucketCacheIOEngineName = conf.get(BUCKET_CACHE_IOENGINE_KEY, null); float bucketCachePercentage = conf.getFloat(BUCKET_CACHE_SIZE_KEY, 0F); // A percentage of max heap size or a absolute value with unit megabytes long bucketCacheSize = (long) (bucketCachePercentage < 1 ? mu.getMax() * bucketCachePercentage : bucketCachePercentage * 1024 * 1024); boolean combinedWithLru = conf.getBoolean(BUCKET_CACHE_COMBINED_KEY, DEFAULT_BUCKET_CACHE_COMBINED); BucketCache bucketCache = null; if (bucketCacheIOEngineName != null && bucketCacheSize > 0) { int writerThreads = conf.getInt(BUCKET_CACHE_WRITER_THREADS_KEY, DEFAULT_BUCKET_CACHE_WRITER_THREADS); int writerQueueLen = conf.getInt(BUCKET_CACHE_WRITER_QUEUE_KEY, DEFAULT_BUCKET_CACHE_WRITER_QUEUE); String persistentPath = conf.get(BUCKET_CACHE_PERSISTENT_PATH_KEY); float combinedPercentage = conf.getFloat(BUCKET_CACHE_COMBINED_PERCENTAGE_KEY, DEFAULT_BUCKET_CACHE_COMBINED_PERCENTAGE); if (combinedWithLru) { lruCacheSize = (long) ((1 - combinedPercentage) * bucketCacheSize); bucketCacheSize = (long) (combinedPercentage * bucketCacheSize); } try { int ioErrorsTolerationDuration = conf.getInt( "hbase.bucketcache.ioengine.errors.tolerated.duration", BucketCache.DEFAULT_ERROR_TOLERATION_DURATION); bucketCache = new BucketCache(bucketCacheIOEngineName, bucketCacheSize, blockSize, writerThreads, writerQueueLen, persistentPath, ioErrorsTolerationDuration); } catch (IOException ioex) { LOG.error("Can't instantiate bucket cache", ioex); throw new RuntimeException(ioex); } } LOG.info("Allocating LruBlockCache size=" + StringUtils.byteDesc(lruCacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize)); LruBlockCache lruCache = new LruBlockCache(lruCacheSize, blockSize); lruCache.setVictimCache(bucketCache); if (bucketCache != null && combinedWithLru) { GLOBAL_BLOCK_CACHE_INSTANCE = new CombinedBlockCache(lruCache, bucketCache); } else { GLOBAL_BLOCK_CACHE_INSTANCE = lruCache; } } else { GLOBAL_BLOCK_CACHE_INSTANCE = new DoubleBlockCache(lruCacheSize, slabCacheOffHeapCacheSize, blockSize, blockSize, conf); } return GLOBAL_BLOCK_CACHE_INSTANCE; }
From source file:org.apache.hadoop.hbase.io.hfile.TestCacheConfig.java
private void doBucketCacheConfigTest() { final int bcSize = 100; this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize); CacheConfig cc = new CacheConfig(this.conf); basicBlockCacheOps(cc, false, false); assertTrue(cc.getBlockCache() instanceof CombinedBlockCache); // TODO: Assert sizes allocated are right and proportions. CombinedBlockCache cbc = (CombinedBlockCache) cc.getBlockCache(); BlockCache[] bcs = cbc.getBlockCaches(); assertTrue(bcs[0] instanceof LruBlockCache); LruBlockCache lbc = (LruBlockCache) bcs[0]; assertEquals(// www . j a v a 2s.co m CacheConfig.getLruCacheSize(this.conf, ManagementFactory.getMemoryMXBean().getHeapMemoryUsage()), lbc.getMaxSize()); assertTrue(bcs[1] instanceof BucketCache); BucketCache bc = (BucketCache) bcs[1]; // getMaxSize comes back in bytes but we specified size in MB assertEquals(bcSize, bc.getMaxSize() / (1024 * 1024)); }
From source file:org.apache.hadoop.hbase.io.hfile.TestCacheConfig.java
/** * Assert that when BUCKET_CACHE_COMBINED_KEY is false, the non-default, that we deploy * LruBlockCache as L1 with a BucketCache for L2. *//*from w w w. j a v a2 s.c o m*/ @Test(timeout = 10000) public void testBucketCacheConfigL1L2Setup() { this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); // Make lru size is smaller than bcSize for sure. Need this to be true so when eviction // from L1 happens, it does not fail because L2 can't take the eviction because block too big. this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.001f); MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); long lruExpectedSize = CacheConfig.getLruCacheSize(this.conf, mu); final int bcSize = 100; long bcExpectedSize = 100 * 1024 * 1024; // MB. assertTrue(lruExpectedSize < bcExpectedSize); this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize); this.conf.setBoolean(CacheConfig.BUCKET_CACHE_COMBINED_KEY, false); CacheConfig cc = new CacheConfig(this.conf); basicBlockCacheOps(cc, false, false); assertTrue(cc.getBlockCache() instanceof LruBlockCache); // TODO: Assert sizes allocated are right and proportions. LruBlockCache lbc = (LruBlockCache) cc.getBlockCache(); assertEquals(lruExpectedSize, lbc.getMaxSize()); BlockCache bc = lbc.getVictimHandler(); // getMaxSize comes back in bytes but we specified size in MB assertEquals(bcExpectedSize, ((BucketCache) bc).getMaxSize()); // Test the L1+L2 deploy works as we'd expect with blocks evicted from L1 going to L2. long initialL1BlockCount = lbc.getBlockCount(); long initialL2BlockCount = bc.getBlockCount(); Cacheable c = new DataCacheEntry(); BlockCacheKey bck = new BlockCacheKey("bck", 0); lbc.cacheBlock(bck, c, false, false); assertEquals(initialL1BlockCount + 1, lbc.getBlockCount()); assertEquals(initialL2BlockCount, bc.getBlockCount()); // Force evictions by putting in a block too big. final long justTooBigSize = lbc.acceptableSize() + 1; lbc.cacheBlock(new BlockCacheKey("bck2", 0), new DataCacheEntry() { @Override public long heapSize() { return justTooBigSize; } @Override public int getSerializedLength() { return (int) heapSize(); } }); // The eviction thread in lrublockcache needs to run. while (initialL1BlockCount != lbc.getBlockCount()) Threads.sleep(10); assertEquals(initialL1BlockCount, lbc.getBlockCount()); long count = bc.getBlockCount(); assertTrue(initialL2BlockCount + 1 <= count); }
From source file:org.apache.hadoop.hbase.io.util.HeapMemorySizeUtil.java
/** * @param conf/*from w w w .j a v a 2 s.com*/ * @return The on heap size for L2 block cache. */ public static float getL2BlockCacheHeapPercent(Configuration conf) { float l2CachePercent = 0.0F; String bucketCacheIOEngineName = conf.get(HConstants.BUCKET_CACHE_IOENGINE_KEY, null); // L2 block cache can be on heap when IOEngine is "heap" if (bucketCacheIOEngineName != null && bucketCacheIOEngineName.startsWith("heap")) { float bucketCachePercentage = conf.getFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0F); MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); l2CachePercent = bucketCachePercentage < 1 ? bucketCachePercentage : (bucketCachePercentage * 1024 * 1024) / mu.getMax(); } return l2CachePercent; }
From source file:org.apache.hadoop.hbase.io.util.MemorySizeUtil.java
/** * Return JVM memory statistics while properly handling runtime exceptions from the JVM. * @return a memory usage object, null if there was a runtime exception. (n.b. you * could also get -1 values back from the JVM) * @see MemoryUsage//from w w w . ja v a 2 s . co m */ public static MemoryUsage safeGetHeapMemoryUsage() { MemoryUsage usage = null; try { usage = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); } catch (RuntimeException exception) { LOG.warn(JVM_HEAP_EXCEPTION, exception); } return usage; }