List of usage examples for java.lang Runtime maxMemory
public native long maxMemory();
From source file:com.viettel.logistic.wms.business.KpiLogBusiness.java
private void checkMemory(String functionName, String transactionCode) { //Getting the runtime reference from system Runtime runtime = Runtime.getRuntime(); int mb = 1024 * 1024; System.out.println("##### Heap utilization statistics [MB] #####"); System.out.println("##### Transaction code = " + transactionCode + " #####"); //Print used memory System.out.println(functionName + " Used Memory:" + (runtime.totalMemory() - runtime.freeMemory()) / mb); //Print free memory System.out.println(functionName + " Free Memory:" + runtime.freeMemory() / mb); //Print total available memory System.out.println(functionName + " Total Memory:" + runtime.totalMemory() / mb); //Print Maximum available memory System.out.println(functionName + " Max Memory:" + runtime.maxMemory() / mb); }
From source file:org.fcrepo.server.security.xacml.pdp.data.FedoraPolicyStore.java
@Override public void init() throws PolicyStoreException, FileNotFoundException { if (log.isDebugEnabled()) { Runtime runtime = Runtime.getRuntime(); log.debug("Total memory: " + runtime.totalMemory() / 1024); log.debug("Free memory: " + runtime.freeMemory() / 1024); log.debug("Max memory: " + runtime.maxMemory() / 1024); }// w ww . j a va 2 s . c om super.init(); // if no pid namespace was specified, use the default specified in fedora.fcfg if (pidNamespace.equals("")) { pidNamespace = fedoraServer.getModule("org.fcrepo.server.storage.DOManager") .getParameter("pidNamespace"); } // check control group was supplied if (datastreamControlGroup.equals("")) { throw new PolicyStoreException( "No control group for policy datastreams was specified in FedoraPolicyStore configuration"); } if (validateSchema) { String schemaLocation = schemaLocations.get(XACML20_POLICY_NS); if (schemaLocation == null) { throw new PolicyStoreException("Configuration error - no policy schema specified"); } try { String serverHome = fedoraServer.getHomeDir().getCanonicalPath() + File.separator; String schemaPath = ((schemaLocation).startsWith(File.separator) ? "" : serverHome) + schemaLocation; FileInputStream in = new FileInputStream(schemaPath); PolicyParser policyParser = new PolicyParser(in); ValidationUtility.setFeslPolicyParser(policyParser); } catch (IOException ioe) { throw new PolicyStoreException(ioe.getMessage(), ioe); } catch (SAXException se) { throw new PolicyStoreException(se.getMessage(), se); } } }
From source file:at.beris.virtualfile.shell.Shell.java
private void displayStatistics() { Runtime runtime = Runtime.getRuntime(); System.out.println("** Heap utilization statistics [KB] **"); System.out//w w w . j a v a 2 s .c om .println(String.format("Used Memory: %,d", (runtime.totalMemory() - runtime.freeMemory()) / 1024)); System.out.println(String.format("Free Memory: %,d", runtime.freeMemory() / 1024)); System.out.println(String.format("Total Memory: %,d", runtime.totalMemory() / 1024)); System.out.println(String.format("Max Memory: %,d", runtime.maxMemory() / 1024)); }
From source file:org.janusgraph.diskstorage.Backend.java
/** * Initializes this backend with the given configuration. Must be called before this Backend can be used * * @param config/* w ww . j a va 2 s. co m*/ */ public void initialize(Configuration config) { try { //EdgeStore & VertexIndexStore KeyColumnValueStore idStore = storeManager.openDatabase(ID_STORE_NAME); idAuthority = null; if (storeFeatures.isKeyConsistent()) { idAuthority = new ConsistentKeyIDAuthority(idStore, storeManager, config); } else { throw new IllegalStateException( "Store needs to support consistent key or transactional operations for ID manager to guarantee proper id allocations"); } KeyColumnValueStore edgeStoreRaw = storeManagerLocking.openDatabase(EDGESTORE_NAME); KeyColumnValueStore indexStoreRaw = storeManagerLocking.openDatabase(INDEXSTORE_NAME); //Configure caches if (cacheEnabled) { long expirationTime = configuration.get(DB_CACHE_TIME); Preconditions.checkArgument(expirationTime >= 0, "Invalid cache expiration time: %s", expirationTime); if (expirationTime == 0) expirationTime = ETERNAL_CACHE_EXPIRATION; long cacheSizeBytes; double cachesize = configuration.get(DB_CACHE_SIZE); Preconditions.checkArgument(cachesize > 0.0, "Invalid cache size specified: %s", cachesize); if (cachesize < 1.0) { //Its a percentage Runtime runtime = Runtime.getRuntime(); cacheSizeBytes = (long) ((runtime.maxMemory() - (runtime.totalMemory() - runtime.freeMemory())) * cachesize); } else { Preconditions.checkArgument(cachesize > 1000, "Cache size is too small: %s", cachesize); cacheSizeBytes = (long) cachesize; } log.info("Configuring total store cache size: {}", cacheSizeBytes); long cleanWaitTime = configuration.get(DB_CACHE_CLEAN_WAIT); Preconditions.checkArgument(EDGESTORE_CACHE_PERCENT + INDEXSTORE_CACHE_PERCENT == 1.0, "Cache percentages don't add up!"); long edgeStoreCacheSize = Math.round(cacheSizeBytes * EDGESTORE_CACHE_PERCENT); long indexStoreCacheSize = Math.round(cacheSizeBytes * INDEXSTORE_CACHE_PERCENT); edgeStore = new ExpirationKCVSCache(edgeStoreRaw, getMetricsCacheName(EDGESTORE_NAME), expirationTime, cleanWaitTime, edgeStoreCacheSize); indexStore = new ExpirationKCVSCache(indexStoreRaw, getMetricsCacheName(INDEXSTORE_NAME), expirationTime, cleanWaitTime, indexStoreCacheSize); } else { edgeStore = new NoKCVSCache(edgeStoreRaw); indexStore = new NoKCVSCache(indexStoreRaw); } //Just open them so that they are cached txLogManager.openLog(SYSTEM_TX_LOG_NAME); mgmtLogManager.openLog(SYSTEM_MGMT_LOG_NAME); txLogStore = new NoKCVSCache(storeManager.openDatabase(SYSTEM_TX_LOG_NAME)); //Open global configuration KeyColumnValueStore systemConfigStore = storeManagerLocking.openDatabase(SYSTEM_PROPERTIES_STORE_NAME); systemConfig = getGlobalConfiguration(new BackendOperation.TransactionalProvider() { @Override public StoreTransaction openTx() throws BackendException { return storeManagerLocking.beginTransaction(StandardBaseTransactionConfig .of(configuration.get(TIMESTAMP_PROVIDER), storeFeatures.getKeyConsistentTxConfig())); } @Override public void close() throws BackendException { //Do nothing, storeManager is closed explicitly by Backend } }, systemConfigStore, configuration); userConfig = getConfiguration(new BackendOperation.TransactionalProvider() { @Override public StoreTransaction openTx() throws BackendException { return storeManagerLocking.beginTransaction( StandardBaseTransactionConfig.of(configuration.get(TIMESTAMP_PROVIDER))); } @Override public void close() throws BackendException { //Do nothing, storeManager is closed explicitly by Backend } }, systemConfigStore, USER_CONFIGURATION_IDENTIFIER, configuration); } catch (BackendException e) { throw new JanusGraphException("Could not initialize backend", e); } }
From source file:com.ettrema.zsync.IntegrationTests.java
/** * Constructs an UploadMaker/UploadMakerEx, saves the Upload stream to a new File with * name uploadFileName, and returns that File. * //from w w w . j av a 2 s . co m * @param localFile The local file to be uploaded * @param zsFile The zsync of the server file * @param uploadFileName The name of the File in which to save the upload stream * @return * @throws IOException */ private File makeAndSaveUpload(File localFile, File zsFile, String uploadFileName) throws IOException { System.out.println("------------- makeAndSaveUpload --------------------"); System.gc(); Runtime rt = Runtime.getRuntime(); UploadMaker umx = new UploadMaker(localFile, zsFile); InputStream uploadIn = umx.makeUpload(); File uploadFile = new File(uploadFileName); if (uploadFile.exists()) { if (!uploadFile.delete()) { throw new RuntimeException("Couldnt delete: " + uploadFile.getAbsolutePath()); } } FileOutputStream uploadOut = new FileOutputStream(uploadFile); System.gc(); System.out.println("Memory stats: " + formatBytes(rt.maxMemory()) + " - " + formatBytes(rt.totalMemory()) + " - " + formatBytes(rt.freeMemory())); long endUsed = (rt.totalMemory() - rt.freeMemory()); System.out.println("Start used memory: " + formatBytes(startUsed) + " end used memory: " + formatBytes(endUsed) + " - delta: " + formatBytes(endUsed - startUsed)); System.out.println(""); IOUtils.copy(uploadIn, uploadOut); uploadIn.close(); uploadOut.close(); System.out.println("Created upload of size: " + formatBytes(uploadFile.length()) + " from local file: " + formatBytes(localFile.length())); return uploadFile; }
From source file:edu.internet2.middleware.shibboleth.idp.StatusServlet.java
/** * Prints out information about the operating environment. This includes the operating system name, version and * architecture, the JDK version, available CPU cores, memory currently used by the JVM process, the maximum amount * of memory that may be used by the JVM, and the current time in UTC. * //from w w w . j av a 2 s . c om * @param out output writer to which information will be written */ protected void printOperatingEnvironmentInformation(PrintWriter out) { Runtime runtime = Runtime.getRuntime(); DateTime now = new DateTime(ISOChronology.getInstanceUTC()); out.println("### Operating Environment Information"); out.println("operating_system: " + System.getProperty("os.name")); out.println("operating_system_version: " + System.getProperty("os.version")); out.println("operating_system_architecture: " + System.getProperty("os.arch")); out.println("jdk_version: " + System.getProperty("java.version")); out.println("available_cores: " + runtime.availableProcessors()); out.println("used_memory: " + runtime.totalMemory() / 1048576 + "MB"); out.println("maximum_memory: " + runtime.maxMemory() / 1048576 + "MB"); out.println("start_time: " + startTime.toString(dateFormat)); out.println("current_time: " + now.toString(dateFormat)); out.println("uptime: " + (now.getMillis() - startTime.getMillis()) + "ms"); }
From source file:org.apache.hadoop.hdfs.server.namenode.Namenode2AgentServiceImpl.java
@Override public Map getNamenodeInfo() { Map map = new HashMap<>(); NameNode namenode = Namenode2Agent.namenode; Configuration configuration = Namenode2Agent.configuration; map.put("hostName", namenode.getAddress(configuration).getHostName()); map.put("port", namenode.getAddress(configuration).getPort()); // Block/*from w w w .j a v a 2s . co m*/ map.put("blocksTotal", namenode.getNamesystem().getBlocksTotal()); map.put("corruptReplicatedBlocks", namenode.getNamesystem().getCorruptReplicaBlocks()); map.put("pendingReplicationBlocks", namenode.getNamesystem().getPendingReplicationBlocks()); map.put("scheduledReplicationBlocks", namenode.getNamesystem().getScheduledReplicationBlocks()); map.put("underReplicatedBlocks", namenode.getNamesystem().getUnderReplicatedBlocks()); map.put("missingBlocks", namenode.getNamesystem().getNumberOfMissingBlocks()); map.put("blockCapacity", namenode.getNamesystem().getBlockCapacity()); // Node Status map.put("all", namenode.getNamesystem().getNumberOfDatanodes(HdfsConstants.DatanodeReportType.ALL)); map.put("dead", namenode.getNamesystem().getNumberOfDatanodes(HdfsConstants.DatanodeReportType.DEAD)); map.put("live", namenode.getNamesystem().getNumberOfDatanodes(HdfsConstants.DatanodeReportType.LIVE)); map.put("decommissioning", namenode.getNamesystem().getNumberOfDatanodes(HdfsConstants.DatanodeReportType.DECOMMISSIONING)); map.put("stale", namenode.getNamesystem().getNumStaleDataNodes()); // FSNamesystem //map.put("defaultBlockSize", namenode.getNamesystem().getDefaultBlockSize()); map.put("defaultBlockSize", configuration.get("dfs.blocksize")); map.put("totalFiles", namenode.getNamesystem().getTotalFiles()); map.put("totalBlocks", namenode.getNamesystem().getTotalBlocks()); map.put("totalLoad", namenode.getNamesystem().getTotalLoad()); // DFS Capacity map.put("capacityRemaining", namenode.getNamesystem().getCapacityRemainingGB()); map.put("capacityRemainingPercent", Math.round(100 / namenode.getNamesystem().getCapacityTotal() * namenode.getNamesystem().getCapacityRemaining())); map.put("capacityTotal", namenode.getNamesystem().getCapacityTotalGB()); map.put("capacityUsed", namenode.getNamesystem().getCapacityUsedGB()); map.put("capacityUsedNonDFS", namenode.getNamesystem().getCapacityUsedNonDFS()); map.put("capacityUsedPercent", Math.round(100 / namenode.getNamesystem().getCapacityTotal() * namenode.getNamesystem().getCapacityUsedNonDFS())); // DFS Usage map.put("free", namenode.getNamesystem().getFree()); map.put("used", namenode.getNamesystem().getUsed()); map.put("total", namenode.getNamesystem().getTotal()); map.put("threads", namenode.getNamesystem().getThreads()); map.put("startTime", namenode.getNamesystem().getStartTime()); // JVM Heap Size final Runtime rt = Runtime.getRuntime(); final long totalMemory = rt.totalMemory() / MEGA_BYTES; final long freeMemory = rt.freeMemory() / MEGA_BYTES; map.put("jvmMaxMemory", rt.maxMemory() / MEGA_BYTES); map.put("jvmTotalMemory", rt.totalMemory() / MEGA_BYTES); map.put("jvmFreeMemory", rt.freeMemory() / MEGA_BYTES); map.put("jvmUsedMemory", totalMemory - freeMemory); return map; }
From source file:org.LexGrid.LexBIG.distributed.test.testUtility.PerformanceTest.java
public void printMemoryStatistics() { int mb = 1024 * 1024; //Getting the runtime reference from system Runtime runtime = Runtime.getRuntime(); System.out.println("##### Heap utilization statistics [MB] #####"); //Print used memory System.out.println("Used Memory:" + (runtime.totalMemory() - runtime.freeMemory()) / mb); //Print free memory System.out.println("Free Memory:" + runtime.freeMemory() / mb); //Print total available memory System.out.println("Total Memory:" + runtime.totalMemory() / mb); //Print Maximum available memory System.out.println("Max Memory:" + runtime.maxMemory() / mb); }
From source file:edu.upenn.ircs.lignos.morsel.MorphLearner.java
private String memoryStatus() { // Check the memory Runtime runtime = Runtime.getRuntime(); long usage = runtime.totalMemory() - runtime.freeMemory(); long remaining = runtime.maxMemory() - usage; // Conver to megabytes usage /= 1048576L;// w w w .j a v a2 s.c o m remaining /= 1048576L; return "Memory status: " + usage + "MB Used, " + remaining + "MB Remaining"; }
From source file:org.mrgeo.resources.wms.WmsGenerator.java
private Response handleRequest(@Context UriInfo uriInfo) { long start = System.currentTimeMillis(); try {/*from ww w . ja va 2 s. com*/ MultivaluedMap<String, String> allParams = uriInfo.getQueryParameters(); String request = getQueryParam(allParams, "request", "GetCapabilities"); ProviderProperties providerProperties = SecurityUtils.getProviderProperties(); String serviceName = getQueryParam(allParams, "service"); if (serviceName == null) { return writeError(Response.Status.BAD_REQUEST, "Missing required SERVICE parameter. Should be set to \"WMS\""); } if (!serviceName.equalsIgnoreCase("wms")) { return writeError(Response.Status.BAD_REQUEST, "Invalid SERVICE parameter. Should be set to \"WMS\""); } if (request.equalsIgnoreCase("getmap")) { return getMap(allParams, providerProperties); } else if (request.equalsIgnoreCase("getmosaic")) { return getMosaic(allParams, providerProperties); } else if (request.equalsIgnoreCase("gettile")) { return getTile(allParams, providerProperties); } else if (request.equalsIgnoreCase("getcapabilities")) { return getCapabilities(uriInfo, allParams, providerProperties); } else if (request.equalsIgnoreCase("describetiles")) { return describeTiles(uriInfo, allParams, providerProperties); } return writeError(Response.Status.BAD_REQUEST, "Invalid request"); } finally { if (log.isDebugEnabled()) { log.debug("WMS request time: {}ms", (System.currentTimeMillis() - start)); // this can be resource intensive. System.gc(); final Runtime rt = Runtime.getRuntime(); log.debug(String.format("WMS request memory: %.1fMB / %.1fMB\n", (rt.totalMemory() - rt.freeMemory()) / 1e6, rt.maxMemory() / 1e6)); } } }