List of usage examples for java.lang Runtime availableProcessors
public native int availableProcessors();
From source file:ai.susi.server.api.susi.StatusService.java
@Override public JSONObject serviceImpl(Query post, HttpServletResponse response, Authorization rights, JsonObjectWithDefault permissions) throws APIException { post.setResponse(response, "application/javascript"); // generate json Runtime runtime = Runtime.getRuntime(); JSONObject json = new JSONObject(true); JSONObject system = new JSONObject(true); system.put("assigned_memory", runtime.maxMemory()); system.put("used_memory", runtime.totalMemory() - runtime.freeMemory()); system.put("available_memory", runtime.maxMemory() - runtime.totalMemory() + runtime.freeMemory()); system.put("cores", runtime.availableProcessors()); system.put("threads", Thread.activeCount()); system.put("runtime", System.currentTimeMillis() - Caretaker.startupTime); system.put("load_system_average", OS.getSystemLoadAverage()); system.put("load_system_cpu", OS.getSystemCpuLoad()); system.put("load_process_cpu", OS.getProcessCpuLoad()); system.put("server_threads", SusiServer.getServerThreads()); system.put("server_uri", SusiServer.getServerURI()); JSONObject index = new JSONObject(true); JSONObject messages = new JSONObject(true); JSONObject queue = new JSONObject(true); messages.put("queue", queue); JSONObject users = new JSONObject(true); JSONObject queries = new JSONObject(true); JSONObject accounts = new JSONObject(true); JSONObject user = new JSONObject(true); index.put("messages", messages); index.put("users", users); index.put("queries", queries); index.put("accounts", accounts); index.put("user", user); JSONObject client_info = new JSONObject(true); client_info.put("RemoteHost", post.getClientHost()); client_info.put("IsLocalhost", post.isLocalhostAccess() ? "true" : "false"); JSONObject request_header = new JSONObject(true); Enumeration<String> he = post.getRequest().getHeaderNames(); while (he.hasMoreElements()) { String h = he.nextElement(); request_header.put(h, post.getRequest().getHeader(h)); }// w w w . ja v a 2 s.c o m client_info.put("request_header", request_header); json.put("system", system); json.put("index", index); json.put("client_info", client_info); return json; }
From source file:spade.storage.Neo4j.java
public static void index(String dbpath, boolean printProgress) { int totalThreads = Runtime.getRuntime().availableProcessors(); final ConcurrentLinkedQueue<Node> nodeTaskQueue = new ConcurrentLinkedQueue<Node>(); final ConcurrentLinkedQueue<Relationship> edgeTaskQueue = new ConcurrentLinkedQueue<Relationship>(); final ReentrantReadWriteLock nodeRwlock = new ReentrantReadWriteLock(); final ReentrantReadWriteLock edgeRwlock = new ReentrantReadWriteLock(); final Index<Node> vertexIndex; final RelationshipIndex edgeIndex; System.out.println("Loading database..."); File databaseFile = new File(dbpath); final GraphDatabaseService graphDb = new GraphDatabaseFactory().newEmbeddedDatabaseBuilder(databaseFile) .setConfig(GraphDatabaseSettings.pagecache_memory, "" + (Runtime.getRuntime().totalMemory() * 9) / 10) // .setConfig(GraphDatabaseSettings.keep_logical_logs, "false") .newGraphDatabase();//w w w .j a v a 2 s .c o m System.out.println("Loaded"); // clear already present indexes try (Transaction tx = graphDb.beginTx()) { graphDb.index().forNodes(spade.storage.Neo4j.VERTEX_INDEX).delete(); tx.success(); } try (Transaction tx = graphDb.beginTx()) { graphDb.index().forRelationships(spade.storage.Neo4j.EDGE_INDEX).delete(); tx.success(); } // System.out.println("Creating Indexing discriptors..."); try (Transaction tx = graphDb.beginTx()) { vertexIndex = graphDb.index().forNodes(spade.storage.Neo4j.VERTEX_INDEX); tx.success(); } try (Transaction tx = graphDb.beginTx()) { edgeIndex = graphDb.index().forRelationships(spade.storage.Neo4j.EDGE_INDEX); tx.success(); } System.out.println("Created"); class NodeIndexer implements Runnable { public void run() { Transaction tx = graphDb.beginTx(); int counter = 0; try { while (!Thread.currentThread().isInterrupted()) { if (counter < 10000) { Node node = nodeTaskQueue.poll(); if (node == null) { continue; } for (String key : node.getPropertyKeys()) { vertexIndex.add(node, key, (String) node.getProperty(key)); } node.setProperty(ID_STRING, node.getId()); vertexIndex.add(node, ID_STRING, Long.toString(node.getId())); counter++; } if (counter > 1000 && nodeRwlock.writeLock().tryLock()) { tx.success(); tx.close(); tx = graphDb.beginTx(); nodeRwlock.writeLock().unlock(); counter = 0; } } } finally { // tx.success(); tx.close(); if (nodeRwlock.writeLock().isHeldByCurrentThread()) { nodeRwlock.writeLock().unlock(); } } } } class RelationshipIndexer implements Runnable { public void run() { Transaction tx = graphDb.beginTx(); int counter = 0; try { while (!Thread.currentThread().isInterrupted()) { if (counter < 10000) { Relationship relationship = edgeTaskQueue.poll(); if (relationship == null) { continue; } for (String key : relationship.getPropertyKeys()) { edgeIndex.add(relationship, key, (String) relationship.getProperty(key)); } relationship.setProperty(ID_STRING, relationship.getId()); edgeIndex.add(relationship, ID_STRING, Long.toString(relationship.getId())); counter++; } if (counter > 1000 && edgeRwlock.writeLock().tryLock()) { // tx.success(); tx.close(); tx = graphDb.beginTx(); edgeRwlock.writeLock().unlock(); counter = 0; } } } finally { // tx.success(); tx.close(); if (edgeRwlock.writeLock().isHeldByCurrentThread()) { edgeRwlock.writeLock().unlock(); } } } } ArrayList<Thread> nodeWorkers = new ArrayList<Thread>(); for (int i = 0; i < totalThreads / 2; i++) { Thread th = new Thread(new NodeIndexer()); nodeWorkers.add(th); th.start(); } ArrayList<Thread> edgeWorkers = new ArrayList<Thread>(); for (int i = 0; i < totalThreads / 2; i++) { Thread th = new Thread(new RelationshipIndexer()); edgeWorkers.add(th); th.start(); } System.out.println("Counted Nodes and Relationships to index..."); final long total; try (Transaction tx = graphDb.beginTx()) { total = Iterators.count(graphDb.getAllNodes().iterator()) + Iterators.count(graphDb.getAllRelationships().iterator()); tx.success(); } System.out.println("done.\n"); long percentageCompleted = 0; int count = 0; try (Transaction tx = graphDb.beginTx()) { // index nodes Iterator<Node> nodeIterator = graphDb.getAllNodes().iterator(); Iterator<Relationship> edgeIterator = graphDb.getAllRelationships().iterator(); while (edgeIterator.hasNext() || nodeIterator.hasNext()) { if (nodeIterator.hasNext() && nodeTaskQueue.size() < 10000) { nodeTaskQueue.add(nodeIterator.next()); count = count + 1; } if (edgeIterator.hasNext() && edgeTaskQueue.size() < 10000) { edgeTaskQueue.add(edgeIterator.next()); count = count + 1; } if (printProgress) { if (((count * 100) / total) > percentageCompleted) { Runtime rt = Runtime.getRuntime(); long totalMemory = rt.totalMemory() / 1024 / 1024; long freeMemory = rt.freeMemory() / 1024 / 1024; long usedMemory = totalMemory - freeMemory; System.out.print("| Cores: " + rt.availableProcessors() + " | Threads: " + totalThreads + " | Heap (MB) - total: " + totalMemory + " , " + (freeMemory * 100) / totalMemory + "% free" // + " | Total Objects (nodes + relationships) to Index: " + total + " | Indexing Object (nodes + relationships): " + count + " / " + total + " | Completed: " + percentageCompleted + " %" + " |\r"); } percentageCompleted = (count * 100) / total; } } tx.success(); } System.out.println("\n\nIndexing completed. Waiting for queues to clear..."); try { while (nodeTaskQueue.size() != 0 || edgeTaskQueue.size() != 0) { Thread.sleep(1000); } } catch (InterruptedException exception) { } System.out.println("Queues cleared. Threads teardown started..."); for (int i = 0; i < totalThreads / 2; i++) { nodeWorkers.get(i).interrupt(); try { nodeWorkers.get(i).join(); } catch (InterruptedException exception) { } } for (int i = 0; i < totalThreads / 2; i++) { edgeWorkers.get(i).interrupt(); try { edgeWorkers.get(i).join(); } catch (InterruptedException exception) { } } System.out.println("Database shutdown started..."); graphDb.shutdown(); }
From source file:com.tesora.dve.common.PatternLayoutWithHeader.java
@Override public String getHeader() { Runtime runtime = Runtime.getRuntime(); StringBuffer header = new StringBuffer(); header.append(StringUtils.repeat("-", 80)); header.append("\nLog Started : ").append(new Date().toString()); header.append("\nBuild Info : ").append(PELogUtils.getBuildVersionString(true)); header.append("\nMemory : max=").append(String.format("%,d", runtime.maxMemory())).append(" total=") .append(String.format("%,d", runtime.totalMemory())).append(" free=") .append(String.format("%,d", runtime.freeMemory())); header.append("\nProcessors : ").append(runtime.availableProcessors()); try {/*ww w. j a v a 2s .c o m*/ header.append("\nHost : ").append(InetAddress.getLocalHost()); } catch (UnknownHostException e) { header.append("\nHost : unknown"); } header.append("\n"); return header.toString(); }
From source file:org.loklak.api.admin.StatusService.java
@Override public JSONObject serviceImpl(Query post, HttpServletResponse response, Authorization rights, JSONObjectWithDefault permissions) throws APIException { if (post.isLocalhostAccess() && OS.canExecUnix && post.get("upgrade", "").equals("true")) { Caretaker.upgrade(); // it's a hack to add this here, this may disappear anytime }/*from w w w. j a v a 2s . c o m*/ final String backend = DAO.getConfig("backend", ""); final boolean backend_push = DAO.getConfig("backend.push.enabled", false); JSONObject backend_status = null; JSONObject backend_status_index_sizes = null; if (backend.length() > 0 && !backend_push) try { backend_status = StatusService.status(backend); backend_status_index_sizes = backend_status == null ? null : (JSONObject) backend_status.get("index_sizes"); } catch (IOException e) { } long backend_messages = backend_status_index_sizes == null ? 0 : ((Number) backend_status_index_sizes.get("messages")).longValue(); long backend_users = backend_status_index_sizes == null ? 0 : ((Number) backend_status_index_sizes.get("users")).longValue(); long local_messages = DAO.countLocalMessages(); long local_users = DAO.countLocalUsers(); post.setResponse(response, "application/javascript"); // generate json Runtime runtime = Runtime.getRuntime(); JSONObject json = new JSONObject(true); JSONObject system = new JSONObject(true); system.put("assigned_memory", runtime.maxMemory()); system.put("used_memory", runtime.totalMemory() - runtime.freeMemory()); system.put("available_memory", runtime.maxMemory() - runtime.totalMemory() + runtime.freeMemory()); system.put("cores", runtime.availableProcessors()); system.put("threads", Thread.activeCount()); system.put("runtime", System.currentTimeMillis() - Caretaker.startupTime); system.put("time_to_restart", Caretaker.upgradeTime - System.currentTimeMillis()); system.put("load_system_average", OS.getSystemLoadAverage()); system.put("load_system_cpu", OS.getSystemCpuLoad()); system.put("load_process_cpu", OS.getProcessCpuLoad()); system.put("server_threads", LoklakServer.getServerThreads()); system.put("server_uri", LoklakServer.getServerURI()); JSONObject index = new JSONObject(true); long countLocalMinMessages = DAO.countLocalMessages(60000L); long countLocal10MMessages = DAO.countLocalMessages(600000L); long countLocalHourMessages = DAO.countLocalMessages(3600000L); long countLocalDayMessages = DAO.countLocalMessages(86400000L); long countLocalWeekMessages = DAO.countLocalMessages(604800000L); float mps1m = countLocalMinMessages / 60f; float mps10m = countLocal10MMessages / 600f; float mps1h = countLocalHourMessages / 3600f; float mps1d = countLocalDayMessages / 86400f; float mps1w = countLocalWeekMessages / 604800f; index.put("mps1m", mps1m); index.put("mps10m", mps10m); index.put("mps1h", mps1h); index.put("mps1d", mps1d); index.put("mps1w", mps1w); index.put("mps", (int) Math.max(mps1d, Math.max(mps1h, Math.max(mps10m, mps1m)))); // best of 1d, 1h and 10m JSONObject messages = new JSONObject(true); messages.put("size", local_messages + backend_messages); messages.put("size_local", local_messages); messages.put("size_local_minute", countLocalMinMessages); messages.put("size_local_10minutes", countLocal10MMessages); messages.put("size_local_hour", countLocalHourMessages); messages.put("size_local_day", countLocalDayMessages); messages.put("size_local_week", countLocalWeekMessages); messages.put("size_backend", backend_messages); messages.put("stats", DAO.messages.getStats()); JSONObject queue = new JSONObject(true); queue.put("size", QueuedIndexing.getMessageQueueSize()); queue.put("maxSize", QueuedIndexing.getMessageQueueMaxSize()); queue.put("clients", QueuedIndexing.getMessageQueueClients()); messages.put("queue", queue); JSONObject users = new JSONObject(true); users.put("size", local_users + backend_users); users.put("size_local", local_users); users.put("size_backend", backend_users); users.put("stats", DAO.users.getStats()); JSONObject queries = new JSONObject(true); queries.put("size", DAO.countLocalQueries()); queries.put("stats", DAO.queries.getStats()); JSONObject accounts = new JSONObject(true); accounts.put("size", DAO.countLocalAccounts()); JSONObject user = new JSONObject(true); user.put("size", DAO.user_dump.size()); JSONObject followers = new JSONObject(true); followers.put("size", DAO.followers_dump.size()); JSONObject following = new JSONObject(true); following.put("size", DAO.following_dump.size()); index.put("messages", messages); index.put("users", users); index.put("queries", queries); index.put("accounts", accounts); index.put("user", user); index.put("followers", followers); index.put("following", following); if (DAO.getConfig("retrieval.queries.enabled", false)) { List<QueryEntry> queryList = DAO.SearchLocalQueries("", 1000, "retrieval_next", "date", SortOrder.ASC, null, new Date(), "retrieval_next"); index.put("queries_pending", queryList.size()); } JSONObject client_info = new JSONObject(true); client_info.put("RemoteHost", post.getClientHost()); client_info.put("IsLocalhost", post.isLocalhostAccess() ? "true" : "false"); JSONObject request_header = new JSONObject(true); Enumeration<String> he = post.getRequest().getHeaderNames(); while (he.hasMoreElements()) { String h = he.nextElement(); request_header.put(h, post.getRequest().getHeader(h)); } client_info.put("request_header", request_header); json.put("system", system); json.put("index", index); json.put("client_info", client_info); return json; }
From source file:ome.services.util.JvmSettingsCheck.java
public JvmSettingsCheck() { final String fmt = "%s = %6s"; final Runtime rt = Runtime.getRuntime(); final int mb = 1024 * 1024; StringBuilder version = new StringBuilder(); for (String key : new String[] { "java.version", "os.name", "os.arch", "os.version" }) { if (version.length() != 0) { version.append("; "); }/*from w ww .ja va 2s . c o m*/ version.append(System.getProperty(key)); } log.info("Java version: " + version); log.info(String.format(fmt, "Max Memory (MB): ", (rt.maxMemory() / mb))); log.info(String.format(fmt, "OS Memory (MB): ", (getPhysicalMemory() / mb))); log.info(String.format(fmt, "Processors: ", rt.availableProcessors())); }
From source file:org.deegree.tools.rendering.dem.filtering.DEMRasterFilterer.java
/** * @throws IOException// w w w.j a va 2 s . c om * @throws InterruptedException * @throws Exception * */ private void applyFilter() throws IOException, InterruptedException { Runtime rt = Runtime.getRuntime(); int processors = rt.availableProcessors(); LOG.info("Number of processors: {}", processors); // calculate the rows. RasterGeoReference geoRef = raster.getRasterReference(); Envelope renv = raster.getEnvelope(); RasterRect rect = geoRef.convertEnvelopeToRasterCRS(raster.getEnvelope()); int width = raster.getColumns(); int height = raster.getRows(); int numberOfTiles = Rasters.calcApproxTiles(width, height, TILE_SIZE); int tileWidth = Rasters.calcTileSize(width, numberOfTiles); int tileHeight = Rasters.calcTileSize(height, numberOfTiles); int columns = (int) Math.ceil(((double) width) / tileWidth); int rows = (int) Math.ceil((double) height / tileHeight); GridWriter gridWriter = new GridWriter(columns, rows, renv, geoRef, tmpGridFile, raster.getRasterDataInfo()); FilteredResultWiter resultWriter = new FilteredResultWiter(gridWriter); Stack<RasterFilterer> filters = new Stack<RasterFilterer>(); String lock = "lock"; for (int i = 0; i < processors; ++i) { RasterFilterer rf = new RasterFilterer(this.raster, kernelSize, resultWriter, stdCorr, lock, filters); filters.push(rf); } Thread outputThread = new Thread(resultWriter, "result writer"); outputThread.start(); LOG.info("Tiling raster of {} x {} pixels (width x height) into {} rows and {} columns.", new Object[] { rect.width, rect.height, rows, columns }); int kernelHalf = (this.kernelSize - 1) / 2; long totalTime = currentTimeMillis(); for (int row = 30; row < rows; ++row) { long currentTime = currentTimeMillis(); for (int col = 0; col < columns; ++col) { RasterFilterer filterer = null; while (filterer == null) { synchronized (lock) { if (filters.isEmpty()) { lock.wait(); } else { filterer = filters.pop(); } } } RasterRect outputRect = new RasterRect(((col * tileWidth) - kernelHalf), ((row * tileHeight) - kernelHalf), tileWidth + this.kernelSize, tileHeight + this.kernelSize); filterer.setRasterInformation(outputRect); new Thread(filterer, "row_" + row + "_col_" + col).start(); } double rPT = Math.round((Math.round((currentTimeMillis() - currentTime) / 10d) / 100d)); if (row + 1 < rows) { double remain = rPT * (rows - (row + 1)); LOG.info( "Filtering row: {}, took approximately: {} seconds, estimated remaining time: {} seconds " + ((remain > 60) ? "( {} minutes)." : "."), new Object[] { (row + 1), rPT, remain, Math.round(remain / 60d) }); } System.gc(); RasterCache.dispose(); } while (true) { synchronized (lock) { RasterCache.dispose(); if (filters.size() < processors) { try { // wait for all lock.wait(); } catch (InterruptedException e) { LOG.error( "Could not wait for all filter threads to end because: " + e.getLocalizedMessage(), e); } } else { break; } } } resultWriter.stop(); // outputThread.interrupt(); outputThread.join(); gridWriter.writeMetadataFile(null); StringBuilder sb = new StringBuilder("Processing "); sb.append(rows).append(" rows and "); sb.append(columns).append(" columns of rasters with width: "); sb.append(tileWidth).append(" and height: "); sb.append(tileHeight).append(", took: "); sb.append((Math.round((currentTimeMillis() - totalTime) / 10d) / 100d)).append(" seconds"); LOG.info(sb.toString()); // now output the filtered tiles. outputTiles(); }
From source file:com.sds.acube.ndisc.mts.xserver.XNDiscServer.java
/** * XNDisc Server ? /*ww w .java 2 s . co m*/ * * @return ? */ private String getXNDiscServerStartMessage() { StringBuilder smsg = new StringBuilder(LINE_SEPERATOR); smsg.append("").append(StringUtils.rightPad("", 90, "-")).append("?").append(LINE_SEPERATOR); smsg.append("").append(StringUtils.center("", 90, " ")).append("").append(LINE_SEPERATOR); smsg.append("") .append(StringUtils.center("XNDisc Server (Version : " + XNDISC_SERVER_VERSION + ")", 90, " ")) .append("").append(LINE_SEPERATOR); smsg.append("").append(StringUtils.center("", 90, " ")).append("").append(LINE_SEPERATOR); smsg.append("") .append(StringUtils.center("Listening on " + this.server.getLocalAddress().getHostName() + "(" + this.server.getLocalAddress().getHostAddress() + "):" + this.server.getLocalPort(), 90, " ")) .append("").append(LINE_SEPERATOR); smsg.append("").append(StringUtils.center("", 90, " ")).append("").append(LINE_SEPERATOR); smsg.append("").append(StringUtils.center("Start on " + XNDiscUtils.getStartDate(), 90, " ")) .append("").append(LINE_SEPERATOR); smsg.append("").append(StringUtils.center("", 90, " ")).append("").append(LINE_SEPERATOR); smsg.append("").append(StringUtils.center(" XNDisc Server Options ", 90, "-")).append("") .append(LINE_SEPERATOR); smsg.append("").append(StringUtils.center("", 90, " ")).append("").append(LINE_SEPERATOR); smsg.append("").append(StringUtils.rightPad(" Use SSL : " + Boolean.toString(USE_SSL), 90, " ")) .append("").append(LINE_SEPERATOR); smsg.append("") .append(StringUtils.rightPad(" Dispatcher Initial Size : " + DISPATCHER_INIT_COUNT, 90, " ")) .append("").append(LINE_SEPERATOR); smsg.append("").append( StringUtils.rightPad(" Dispatcher maximum Handle Size : " + DISPATCHER_MAX_HANDLES, 90, " ")) .append("").append(LINE_SEPERATOR); smsg.append("") .append(StringUtils.rightPad(" Read Buffer Use Direct : " + READ_BUFFER_USEDIRECT, 90, " ")) .append("").append(LINE_SEPERATOR); smsg.append("") .append(StringUtils.rightPad(" Worker Pool Thread Priority : " + XNDiscConfig.getString(XNDiscConfig.WORKER_POOL_PRIORITY, "NORM"), 90, " ")) .append("").append(LINE_SEPERATOR); smsg.append("") .append(StringUtils.rightPad(" Worker Pool minimun Size : " + MIN_SIZE_WORKER_POOL, 90, " ")) .append("").append(LINE_SEPERATOR); smsg.append("") .append(StringUtils.rightPad(" Worker Pool maximum Size : " + SIZE_WORKER_POOL, 90, " ")) .append("").append(LINE_SEPERATOR); if (WORKER_POOL_TYPE.equals("F")) { smsg.append("").append(StringUtils.rightPad(" Thread Pool Type : Fixed", 90, " ")) .append("").append(LINE_SEPERATOR); smsg.append("") .append(StringUtils.rightPad(" Thread Pool Size : " + WORKER_THREAD_POOL, 90, " ")) .append("").append(LINE_SEPERATOR); } else if (WORKER_POOL_TYPE.equals("C")) { smsg.append("").append(StringUtils.rightPad(" Thread Pool Type : Cached", 90, " ")) .append("").append(LINE_SEPERATOR); } smsg.append("").append(StringUtils.center("", 90, " ")).append("").append(LINE_SEPERATOR); smsg.append("").append(StringUtils.rightPad("", 90, "-")).append("").append(LINE_SEPERATOR); smsg.append("") .append(StringUtils.rightPad(" OS Architecture : " + System.getProperty("os.arch"), 90, "")) .append("").append(LINE_SEPERATOR); smsg.append("") .append(StringUtils.rightPad(" OS Name : " + System.getProperty("os.name"), 90, "")) .append("").append(LINE_SEPERATOR); smsg.append("").append( StringUtils.rightPad(" OS Version : " + System.getProperty("os.version"), 90, "")) .append("").append(LINE_SEPERATOR); Runtime runtime = Runtime.getRuntime(); smsg.append("").append( StringUtils.rightPad(" Processors : " + runtime.availableProcessors() + "(s)", 90, "")) .append("").append(LINE_SEPERATOR); smsg.append("").append(StringUtils.rightPad("", 90, "-")).append("").append(LINE_SEPERATOR); return smsg.toString(); }
From source file:com.prey.PreyPhone.java
private int getCpuCores() { Runtime runtime = Runtime.getRuntime(); return runtime.availableProcessors(); }
From source file:edu.internet2.middleware.shibboleth.idp.StatusServlet.java
/** * Prints out information about the operating environment. This includes the operating system name, version and * architecture, the JDK version, available CPU cores, memory currently used by the JVM process, the maximum amount * of memory that may be used by the JVM, and the current time in UTC. * //from www .j av a 2 s . c o m * @param out output writer to which information will be written */ protected void printOperatingEnvironmentInformation(PrintWriter out) { Runtime runtime = Runtime.getRuntime(); DateTime now = new DateTime(ISOChronology.getInstanceUTC()); out.println("### Operating Environment Information"); out.println("operating_system: " + System.getProperty("os.name")); out.println("operating_system_version: " + System.getProperty("os.version")); out.println("operating_system_architecture: " + System.getProperty("os.arch")); out.println("jdk_version: " + System.getProperty("java.version")); out.println("available_cores: " + runtime.availableProcessors()); out.println("used_memory: " + runtime.totalMemory() / 1048576 + "MB"); out.println("maximum_memory: " + runtime.maxMemory() / 1048576 + "MB"); out.println("start_time: " + startTime.toString(dateFormat)); out.println("current_time: " + now.toString(dateFormat)); out.println("uptime: " + (now.getMillis() - startTime.getMillis()) + "ms"); }
From source file:spade.utility.BitcoinTools.java
public void writeBlocksToCSV(int startIndex, int endIndex) { // Block block, int lastBlockId int lastBlockId = -1; final BitcoinTools bitcoinTools = new BitcoinTools(); String pattern = "#.##"; DecimalFormat decimalFormat = new DecimalFormat(pattern); final ConcurrentHashMap<Integer, Block> blockMap = new ConcurrentHashMap<Integer, Block>(); final AtomicInteger currentBlock = new AtomicInteger(startIndex); final int stopIndex = endIndex; final int totalThreads = Runtime.getRuntime().availableProcessors(); class BlockFetcher implements Runnable { public void run() { while (true) { if (blockMap.size() > totalThreads * 5) { // max objects to hold in memory max 1 MB * totalThreads * factor try { Thread.sleep(100); continue; } catch (Exception exception) { }//from www . j a v a 2 s . co m } int blockToFetch = currentBlock.getAndIncrement(); try { blockMap.put(blockToFetch, bitcoinTools.getBlock(blockToFetch)); } catch (JSONException exception) { Bitcoin.log(Level.SEVERE, "Block " + blockToFetch + " has invalid json. Redownloading.", exception); try { blockMap.put(blockToFetch, bitcoinTools.getBlock(blockToFetch)); } catch (JSONException ex) { Bitcoin.log(Level.SEVERE, "Block " + blockToFetch + " couldn't be included in CSV.", ex); } } if (blockToFetch >= stopIndex) { break; } } } } ArrayList<Thread> workers = new ArrayList<Thread>(); for (int i = 0; i < totalThreads; i++) { Thread th = new Thread(new BlockFetcher()); workers.add(th); th.start(); } int percentageCompleted = 0; for (int i = startIndex; i < endIndex; i++) { try { Block block; while (!blockMap.containsKey(i)) { } block = blockMap.get(i); blockMap.remove(i); lastBlockId = writeBlockToCSV(block, lastBlockId); if ((((i - startIndex + 1) * 100) / (endIndex - startIndex)) > percentageCompleted) { Runtime rt = Runtime.getRuntime(); long totalMemory = rt.totalMemory() / 1024 / 1024; long freeMemory = rt.freeMemory() / 1024 / 1024; long usedMemory = totalMemory - freeMemory; System.out.print("| Cores: " + rt.availableProcessors() + " | Threads: " + totalThreads + " | Heap (MB) - total: " + totalMemory + ", %age free: " + (freeMemory * 100) / totalMemory + " | At Block: " + (i - startIndex + 1) + " / " + (endIndex - startIndex) + " | Percentage Completed: " + percentageCompleted // + " |\r"); + " |\n"); } percentageCompleted = ((i - startIndex + 1) * 100) / (endIndex - startIndex); } catch (IOException ex) { Bitcoin.log(Level.SEVERE, "Unexpected IOException. Stopping CSV creation.", ex); break; } } for (int i = 0; i < totalThreads; i++) { try { workers.get(i).interrupt(); workers.get(i).join(); } catch (InterruptedException exception) { } } System.out.println("\n\ndone with creating CSVes!"); }