List of usage examples for java.lang.management ManagementFactory getRuntimeMXBean
public static RuntimeMXBean getRuntimeMXBean()
From source file:org.slc.sli.ingestion.processors.JobReportingProcessor.java
private void writeSecurityLog(NewBatchJob job, LogLevelType messageType, String message) { byte[] ipAddr = null; try {//from w ww.j av a 2 s . c om InetAddress addr = InetAddress.getLocalHost(); // Get IP Address ipAddr = addr.getAddress(); } catch (UnknownHostException e) { LOG.error("Error getting local host", e); } String edOrg = tenantDA.getTenantEdOrg(job.getTopLevelSourceId()); if (edOrg == null) { edOrg = ""; } List<String> userRoles = Collections.emptyList(); SecurityEvent event = new SecurityEvent(); event.setTenantId(""); // Alpha MH (tenantId - written in 'message') event.setUser(""); event.setUserEdOrg(edOrg); event.setTargetEdOrgList(edOrg); //@TA10431 - change targetEdOrg from scalar to list event.setActionUri("writeLine"); event.setAppId("Ingestion"); event.setOrigin(""); if (ipAddr != null) { event.setExecutedOn(ipAddr[0] + "." + ipAddr[1] + "." + ipAddr[2] + "." + ipAddr[3]); } event.setCredential(""); event.setUserOrigin(""); event.setTimeStamp(new Date()); event.setProcessNameOrId(ManagementFactory.getRuntimeMXBean().getName()); event.setClassName(this.getClass().getName()); event.setLogLevel(messageType); event.setRoles(userRoles); event.setLogMessage(message); audit(event); }
From source file:com.sos.VirtualFileSystem.DataElements.SOSFileListEntry.java
public String toCsv() { addCSv(guid, objOptions.mandator.Value(), SOSOptionTime.getCurrentTimeAsString()); /**/*from w ww . j a va2s. com*/ * this hack is tested for SUN-JVM only. No guarantee is made for other JVMs */ String pid = ManagementFactory.getRuntimeMXBean().getName(); String strA[] = pid.split("@"); pid = strA[0]; String ppid = System.getProperty("ppid", "0"); addCSv(pid, ppid, objOptions.operation.Value()); SOSConnection2OptionsAlternate objS = objOptions.getConnectionOptions().Source(); addCSv(objS.host.Value(), objS.host.getHostAdress(), System.getProperty("user.name")); SOSConnection2OptionsAlternate objT = objOptions.getConnectionOptions().Target(); addCSv(objT.host.Value(), objT.host.getHostAdress()); String remote_user = ""; if (objT.AlternateOptionsUsed.value() == true) { remote_user = "(alternative) " + objT.Alternatives().user.Value(); } else { remote_user = objT.user.Value(); } addCSv(remote_user, objT.protocol.Value(), objT.port.Value(), objOptions.local_dir.Value(), objOptions.TargetDir.Value(), strSourceFileName); String remote_filename = strTargetFileName; if (isEmpty(remote_filename)) { remote_filename = "n.a."; } addCSv(remote_filename, String.valueOf(lngFileSize), strMD5Hash, eTransferStatus.name()); String last_error_message = ""; // last_error_message = clearCRLF(((getLogger().getError() != null && getLogger().getError().length() > 0) ? getLogger().getError() // : getLogger().getWarning())); // 15- last_error=|warn message // last_error_message = normalizedPassword(sosString.parseToString(last_error_message)); addCSv(last_error_message, objOptions.log_filename.Value(), objOptions.jump_host.Value(), objOptions.jump_host.getHostAdress(), objOptions.jump_port.Value(), objOptions.jump_protocol.Value(), objOptions.jump_user.Value()); SOSOptionTime objModTime = new SOSOptionTime(null, null, null, "", "", false); objModTime.value(lngFileModDate); addCSv(objModTime.getTimeAsString(lngFileModDate)); return strCSVRec; }
From source file:com.evolveum.midpoint.web.util.WebMiscUtil.java
public static double getSystemLoad() { com.sun.management.OperatingSystemMXBean operatingSystemMXBean = (com.sun.management.OperatingSystemMXBean) ManagementFactory .getOperatingSystemMXBean(); RuntimeMXBean runtimeMXBean = ManagementFactory.getRuntimeMXBean(); int availableProcessors = operatingSystemMXBean.getAvailableProcessors(); long prevUpTime = runtimeMXBean.getUptime(); long prevProcessCpuTime = operatingSystemMXBean.getProcessCpuTime(); try {/* www . j a va 2 s. c o m*/ Thread.sleep(150); } catch (Exception ignored) { //ignored } operatingSystemMXBean = (com.sun.management.OperatingSystemMXBean) ManagementFactory .getOperatingSystemMXBean(); long upTime = runtimeMXBean.getUptime(); long processCpuTime = operatingSystemMXBean.getProcessCpuTime(); long elapsedCpu = processCpuTime - prevProcessCpuTime; long elapsedTime = upTime - prevUpTime; double cpuUsage = Math.min(99F, elapsedCpu / (elapsedTime * 10000F * availableProcessors)); return cpuUsage; }
From source file:edu.wustl.lookingglass.community.CommunityRepository.java
private void lockRepo() throws IOException { this.syncLockChannel = FileChannel.open(Paths.get(syncLockPath), StandardOpenOption.WRITE, StandardOpenOption.CREATE); FileLock lock = this.syncLockChannel.lock(); // gets an exclusive lock assert lock.isValid(); this.syncLockChannel.write(ByteBuffer.wrap(ManagementFactory.getRuntimeMXBean().getName().getBytes())); }
From source file:org.apache.hadoop.hbase.regionserver.MemStore.java
/** * Code to help figure if our approximation of object heap sizes is close * enough. See hbase-900. Fills memstores then waits so user can heap * dump and bring up resultant hprof in something like jprofiler which * allows you get 'deep size' on objects. * @param args main args/*from w ww . j av a 2 s. c o m*/ */ public static void main(String[] args) { RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean(); LOG.info("vmName=" + runtime.getVmName() + ", vmVendor=" + runtime.getVmVendor() + ", vmVersion=" + runtime.getVmVersion()); LOG.info("vmInputArguments=" + runtime.getInputArguments()); MemStore memstore1 = new MemStore(); // TODO: x32 vs x64 long size = 0; final int count = 10000; byte[] fam = Bytes.toBytes("col"); byte[] qf = Bytes.toBytes("umn"); byte[] empty = new byte[0]; for (int i = 0; i < count; i++) { // Give each its own ts size += memstore1.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, empty)); } LOG.info("memstore1 estimated size=" + size); for (int i = 0; i < count; i++) { size += memstore1.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, empty)); } LOG.info("memstore1 estimated size (2nd loading of same data)=" + size); // Make a variably sized memstore. MemStore memstore2 = new MemStore(); for (int i = 0; i < count; i++) { size += memstore2.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, new byte[i])); } LOG.info("memstore2 estimated size=" + size); final int seconds = 30; LOG.info("Waiting " + seconds + " seconds while heap dump is taken"); for (int i = 0; i < seconds; i++) { // Thread.sleep(1000); } LOG.info("Exiting."); }
From source file:io.pcp.parfait.dxm.PcpMmvWriter.java
/** * @return the PID of the current running Java Process, or a proxied PID if requested. *///from w w w . j a va 2s. com private int getProcessIdentifier() { if (processIdentifier == 0) { String processName = ManagementFactory.getRuntimeMXBean().getName(); processIdentifier = Integer.valueOf(processName.split("@")[0]); } return processIdentifier; }
From source file:net.greghaines.jesque.worker.WorkerImpl.java
/** * Creates a unique name, suitable for use with Resque. * /* w w w. j a va 2 s . com*/ * @return a unique name for this worker */ protected String createName() { final StringBuilder buf = new StringBuilder(128); try { buf.append(InetAddress.getLocalHost().getHostName()).append(COLON) .append(ManagementFactory.getRuntimeMXBean().getName().split("@")[0]) // PID .append('-').append(this.workerId).append(COLON).append(JAVA_DYNAMIC_QUEUES); for (final String queueName : this.queueNames) { buf.append(',').append(queueName); } } catch (UnknownHostException uhe) { throw new RuntimeException(uhe); } return buf.toString(); }
From source file:org.apache.hadoop.hive.ql.session.SessionState.java
/** * Create dirs & session paths for this session: * 1. HDFS scratch dir// w w w . ja v a 2s .co m * 2. Local scratch dir * 3. Local downloaded resource dir * 4. HDFS session path * 5. hold a lock file in HDFS session dir to indicate the it is in use * 6. Local session path * 7. HDFS temp table space * @param userName * @throws IOException */ private void createSessionDirs(String userName) throws IOException { HiveConf conf = getConf(); Path rootHDFSDirPath = createRootHDFSDir(conf); // Now create session specific dirs String scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION); Path path; // 1. HDFS scratch dir path = new Path(rootHDFSDirPath, userName); hdfsScratchDirURIString = path.toUri().toString(); createPath(conf, path, scratchDirPermission, false, false); // 2. Local scratch dir path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR)); createPath(conf, path, scratchDirPermission, true, false); // 3. Download resources dir path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.DOWNLOADED_RESOURCES_DIR)); createPath(conf, path, scratchDirPermission, true, false); // Finally, create session paths for this session // Local & non-local tmp location is configurable. however it is the same across // all external file systems String sessionId = getSessionId(); // 4. HDFS session path hdfsSessionPath = new Path(hdfsScratchDirURIString, sessionId); createPath(conf, hdfsSessionPath, scratchDirPermission, false, true); conf.set(HDFS_SESSION_PATH_KEY, hdfsSessionPath.toUri().toString()); // 5. hold a lock file in HDFS session dir to indicate the it is in use if (conf.getBoolVar(HiveConf.ConfVars.HIVE_SCRATCH_DIR_LOCK)) { FileSystem fs = hdfsSessionPath.getFileSystem(conf); FSDataOutputStream hdfsSessionPathInfoFile = fs.create(new Path(hdfsSessionPath, INFO_FILE_NAME), true); hdfsSessionPathInfoFile.writeUTF("process: " + ManagementFactory.getRuntimeMXBean().getName() + "\n"); hdfsSessionPathInfoFile.close(); hdfsSessionPathLockFile = fs.create(new Path(hdfsSessionPath, LOCK_FILE_NAME), true); } // 6. Local session path localSessionPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR), sessionId); createPath(conf, localSessionPath, scratchDirPermission, true, true); conf.set(LOCAL_SESSION_PATH_KEY, localSessionPath.toUri().toString()); // 7. HDFS temp table space hdfsTmpTableSpace = new Path(hdfsSessionPath, TMP_PREFIX); // This is a sub-dir under the hdfsSessionPath. Will be removed along with that dir. // Don't register with deleteOnExit createPath(conf, hdfsTmpTableSpace, scratchDirPermission, false, false); conf.set(TMP_TABLE_SPACE_KEY, hdfsTmpTableSpace.toUri().toString()); }
From source file:org.apache.hadoop.hbase.regionserver.DefaultMemStore.java
/** * Code to help figure if our approximation of object heap sizes is close * enough. See hbase-900. Fills memstores then waits so user can heap * dump and bring up resultant hprof in something like jprofiler which * allows you get 'deep size' on objects. * @param args main args/*from w w w .j a v a 2 s .c om*/ */ public static void main(String[] args) { RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean(); LOG.info("vmName=" + runtime.getVmName() + ", vmVendor=" + runtime.getVmVendor() + ", vmVersion=" + runtime.getVmVersion()); LOG.info("vmInputArguments=" + runtime.getInputArguments()); DefaultMemStore memstore1 = new DefaultMemStore(); // TODO: x32 vs x64 long size = 0; final int count = 10000; byte[] fam = Bytes.toBytes("col"); byte[] qf = Bytes.toBytes("umn"); byte[] empty = new byte[0]; for (int i = 0; i < count; i++) { // Give each its own ts size += memstore1.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, empty)); } LOG.info("memstore1 estimated size=" + size); for (int i = 0; i < count; i++) { size += memstore1.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, empty)); } LOG.info("memstore1 estimated size (2nd loading of same data)=" + size); // Make a variably sized memstore. DefaultMemStore memstore2 = new DefaultMemStore(); for (int i = 0; i < count; i++) { size += memstore2.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, new byte[i])); } LOG.info("memstore2 estimated size=" + size); final int seconds = 30; LOG.info("Waiting " + seconds + " seconds while heap dump is taken"); for (int i = 0; i < seconds; i++) { // Thread.sleep(1000); } LOG.info("Exiting."); }
From source file:org.apache.storm.utils.Utils.java
/** * @return the pid of this JVM, because Java doesn't provide a real way to do this. */// w w w .j a v a 2 s .com public static String processPid() { String name = ManagementFactory.getRuntimeMXBean().getName(); String[] split = name.split("@"); if (split.length != 2) { throw new RuntimeException("Got unexpected process name: " + name); } return split[0]; }