List of usage examples for java.lang System gc
public static void gc()
From source file:DIA_Umpire_To_Skyline.FileThread.java
public void GenerateSkylineFiles() { try {/*www . ja va2 s . c o m*/ long time = System.currentTimeMillis(); DIAPack DiaFile = new DIAPack(mzXMLFile, NoCPUs); if (!new File(FilenameUtils.getFullPath(DiaFile.Filename) + DiaFile.GetQ1Name() + ".mzXML").exists() | !new File(FilenameUtils.getFullPath(DiaFile.Filename) + DiaFile.GetQ2Name() + ".mzXML") .exists() | !new File(FilenameUtils.getFullPath(DiaFile.Filename) + DiaFile.GetQ3Name() + ".mzXML") .exists()) { throw new RuntimeException(); } Logger.getRootLogger().info( "================================================================================================="); Logger.getRootLogger().info("Processing " + mzXMLFile); if (!DiaFile.RawMGFExist()) { if (!DiaFile.LoadDIASetting()) { Logger.getRootLogger().info("Loading DIA setting failed, job is incomplete"); System.exit(1); } if (!DiaFile.LoadParams()) { Logger.getRootLogger().info("Loading parameters failed, job is incomplete"); System.exit(1); } DiaFile.BuildStructure(); if (!DiaFile.MS1FeatureMap.ReadPeakCluster()) { Logger.getRootLogger().info("Loading peak and structure failed, job is incomplete"); System.exit(1); } DiaFile.CreateSkylingImportFolder(); DiaFile.GenerateRawMGF(); DiaFile.ClearStructure(); } DiaFile.ConvertRawMGF(msconvertpath); ChangeScanTitlePepXML(); DiaFile = null; System.gc(); time = System.currentTimeMillis() - time; Logger.getRootLogger() .info(mzXMLFile + " processed time:" + String.format("%d hour, %d min, %d sec", TimeUnit.MILLISECONDS.toHours(time), TimeUnit.MILLISECONDS.toMinutes(time) - TimeUnit.HOURS.toMinutes(TimeUnit.MILLISECONDS.toHours(time)), TimeUnit.MILLISECONDS.toSeconds(time) - TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(time)))); } catch (Exception ex) { Logger.getRootLogger().error(ExceptionUtils.getStackTrace(ex)); } }
From source file:com.ibuildapp.romanblack.CataloguePlugin.imageloader.Utils.java
public static Bitmap processBitmap(String fileName, Bitmap.Config config, int widthLimit) { Bitmap bitmap = null;/*from www .j av a2 s . c om*/ try { File tempFile = new File(fileName); BitmapFactory.Options opts = new BitmapFactory.Options(); BufferedInputStream fileInputStream = new BufferedInputStream(new FileInputStream(tempFile)); opts.inJustDecodeBounds = true; BitmapFactory.decodeStream(fileInputStream, null, opts); fileInputStream.close(); fileInputStream = new BufferedInputStream(new FileInputStream(tempFile)); //Find the correct scale value. It should be the power of 2. int width = opts.outWidth; int scale = 1; while (true) { int halfWidth = width / 2; if (halfWidth < widthLimit && (widthLimit - halfWidth) > widthLimit / 4) break; width = halfWidth; scale *= 2; } opts = new BitmapFactory.Options(); opts.inSampleSize = scale; opts.inPreferredConfig = config; try { System.gc(); bitmap = BitmapFactory.decodeStream(fileInputStream, null, opts); if (bitmap != null) return bitmap; } catch (Exception ex) { } catch (OutOfMemoryError e) { } fileInputStream.close(); fileInputStream = new BufferedInputStream(new FileInputStream(tempFile)); try { Thread.sleep(300); } catch (InterruptedException e) { e.printStackTrace(); } try { System.gc(); bitmap = BitmapFactory.decodeStream(fileInputStream, null, opts); if (bitmap != null) return bitmap; } catch (Exception ex) { } catch (OutOfMemoryError ex) { } fileInputStream.close(); fileInputStream = new BufferedInputStream(new FileInputStream(tempFile)); try { Thread.sleep(300); } catch (InterruptedException e) { e.printStackTrace(); } try { System.gc(); bitmap = BitmapFactory.decodeStream(fileInputStream, null, opts); } catch (Exception ex) { } catch (OutOfMemoryError ex) { } fileInputStream.close(); } catch (Exception exception) { exception.printStackTrace(); } return bitmap; }
From source file:net.sf.ehcache.AbstractCacheTest.java
/** * Measure memory used by the VM.// w w w. j a v a 2s . c om * * @return * @throws InterruptedException */ protected long measureMemoryUse() throws InterruptedException { System.gc(); Thread.sleep(2000); System.gc(); return Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory(); }
From source file:net.skyebook.zerocollada.ZeroCollada.java
private static void doRequestedAction(File file, CommandLine cmd) throws IOException, JDOMException { // what is the operation? if (cmd.hasOption(ZCOpts.transform)) { // Do a transform that brings us as close to zero as possible Document dom = createDocument(file); ClosestToOriginTransformer ct = new ClosestToOriginTransformer(dom, cmd.hasOption(ZCOpts.includeX), cmd.hasOption(ZCOpts.includeY), cmd.hasOption(ZCOpts.includeZ)); file = removeOldXYZTag(file);/*from w w w .jav a 2 s . co m*/ ct.writeColladaToFile(newFilename(file, ct)); // recollect the resources ct = null; dom = null; System.gc(); } else if (cmd.hasOption(ZCOpts.translate)) { cmd.getOptionValues(ZCOpts.translate); //System.out.println("Args: " + cmd.getOptionValues(ZCOpts.translate).length); if (cmd.getOptionValues(ZCOpts.translate).length == 0) { showHelp(); } else { Translation translation = new Translation(createDocument(file)); for (int i = 0; i < cmd.getOptionValues(ZCOpts.translate).length; i++) { String arg = cmd.getOptionValues(ZCOpts.translate)[i]; if (arg.equals("zerowithname")) { translation.zeroTranslation(cmd.getOptionValues(ZCOpts.translate)[i + 1]); } else if (arg.equals("zerowithoutname")) { translation.zeroAllTranslationsExcept(cmd.getOptionValues(ZCOpts.translate)[i + 1]); } } translation.writeColladaToFile(newFilename(file, translation)); } } }
From source file:eu.stratosphere.test.util.TestBase.java
@After public void stopCluster() throws Exception { cluster.stopCluster();/*from ww w. j a v a 2s .c o m*/ ClusterProviderPool.removeInstance(clusterConfig); FileSystem.closeAll(); System.gc(); }
From source file:com.edduarte.argus.util.PluginLoader.java
private static Class loadPlugin(Path pluginFile) throws ClassNotFoundException, IOException { CustomClassLoader loader = new CustomClassLoader(); String url = "file:" + pluginFile.toAbsolutePath().toString(); URL myUrl = new URL(url); URLConnection connection = myUrl.openConnection(); InputStream input = connection.getInputStream(); ByteArrayOutputStream buffer = new ByteArrayOutputStream(); int data = input.read(); while (data != -1) { buffer.write(data);/*from www . j ava 2s . c o m*/ data = input.read(); } input.close(); byte[] classData = buffer.toByteArray(); Class loadedClass; try { loadedClass = loader.defineClass(classData); } catch (NoClassDefFoundError ex) { loadedClass = null; } loader.clearAssertionStatus(); loader = null; System.gc(); return loadedClass; }
From source file:Maze.java
/** * Clean up./*w ww .j av a 2s . c o m*/ */ public void destroyApp(boolean unconditional) throws MIDletStateChangeException { myCanvas = null; System.gc(); }
From source file:com.thesmartweb.swebrank.LinksParseAnalysis.java
/** * Method that exports the content from the urls provided and stores it in the ElasticSearch cluster of ours in a specific index * and calls the Semantic Analysis algorithm selected. Until now the method exports content from: * -html/*from w w w .j a v a2 s .c o m*/ * -youtube videos * -pdf files * @param total_links It contains all the links that we are going to analyze * @param domain The domain that we analyze * @param engine The search engine that we analyze the results from * @param example_dir It contains the directory where to save the results of the analysis * @param quer It contains the query for which the urls were the results for (it is used for the creation of the id in elasticsearch) * @param nTopics The number of topics for Latent Dirichlet Allocation * @param alpha The alpha value of LDA * @param beta The beta value of LDA * @param niters The number of iterations of LDA * @param top_words The amount of top words per topic to keep for LDA * @param LDAflag Flag if LDA is used * @param TFIDFflag Flag if TFIDF is used * @param config_path the path that contains the configuration files * @return the parsed output for each url provided */ public String[] perform(String[] total_links, String domain, String engine, String example_dir, String quer, int nTopics, double alpha, double beta, int niters, int top_words, boolean LDAflag, boolean TFIDFflag, String config_path) { String[] parse_output = new String[total_links.length]; try { System.gc(); WebParser web = new WebParser();//our web parser APIconn apicon = new APIconn();//our instance to check the connection to a url int counter_LDA_documents = 0; Settings settings = ImmutableSettings.settingsBuilder().put("cluster.name", "lshrankldacluster") .build(); Client client = new TransportClient(settings) .addTransportAddress(new InetSocketTransportAddress("localhost", 9300)); //Node node = nodeBuilder().client(true).clusterName("lshrankldacluster").node();//our elasticsearch node builder //Client client = node.client();//the client for elasticsearch node for (int i = 0; i < (total_links.length); i++) { parse_output[i] = ""; if (total_links[i] != null) { System.out.println("Link: " + total_links[i] + "\n"); DataManipulation dm = new DataManipulation(); boolean structuredFiled = dm.StructuredFileCheck(total_links[i]);//we check if the url contains a structured document file type if (!apicon.check_conn(total_links[i]).contains("ok-conn") || structuredFiled || total_links[i].contains("http://www.youtube.com/watch?")) { if (total_links[i].contains("http://www.youtube.com/watch?")) {//if the link is a youtube link we have to treat its JSON differently String ventry = total_links[i].substring(31); JSONparsing ypr = new JSONparsing(); url_check = total_links[i]; File current_url = new File(example_dir + engine + "/" + i + "/" + "current_url.txt"); FileUtils.writeStringToFile(current_url, url_check); parse_output[i] = ypr.GetYoutubeDetails(ventry).replace("\n", "").replace("r", ""); System.gc(); if (parse_output[i] != null) { counter_LDA_documents++; String directory = example_dir + engine + "/" + i + "/"; File file_content_lda = new File(directory + "youtube_content.txt"); FileUtils.writeStringToFile(file_content_lda, parse_output[i]); } } if (total_links[i].contains(".pdf")) {//if the link has a pdf we use Snowtide Pdf reader url_check = total_links[i]; File current_url = new File(example_dir + engine + "/" + i + "/" + "current_url.txt"); FileUtils.writeStringToFile(current_url, url_check); File current_pdf = new File(example_dir + engine + "/" + i + "/" + "current_pdf.txt"); URL URLlink = new URL(url_check); FileUtils.copyURLToFile(URLlink, current_pdf); Document pdf = PDF.open(current_pdf); StringWriter buffer = new StringWriter(); pdf.pipe(new OutputTarget(buffer)); pdf.close(); parse_output[i] = buffer.toString().replace("\n", "").replace("\r", ""); Stopwords stopwords = new Stopwords(); parse_output[i] = stopwords.stop(parse_output[i]); System.gc(); boolean deleteQuietly = FileUtils.deleteQuietly(current_pdf);//we delete the file after we read it if (parse_output[i] != null) { counter_LDA_documents++; String directory = example_dir + engine + "/" + i + "/"; File file_content_lda = new File(directory + "pdf_content.txt"); FileUtils.writeStringToFile(file_content_lda, parse_output[i]); } } } else {//if the link does not follow to the cases above, we parse it using WebParser int number = i; String directory = example_dir + engine + "/" + number + "/"; System.out.println("Link:" + total_links[i] + "\n"); url_check = total_links[i]; File current_url = new File(directory + "current_url.txt"); FileUtils.writeStringToFile(current_url, url_check); System.gc(); parse_output[i] = web.Parse(url_check);//we call the parser System.gc(); if (parse_output[i] != null) { counter_LDA_documents++;//we count the amount of documents, as it is needed for JGibbLDA as seen in http://jgibblda.sourceforge.net/#2.3._Input_Data_Format directory = example_dir + engine + "/" + i + "/"; //write the output from the html parsing File file_content_lda = new File(directory + "html_parse_content.txt"); FileUtils.writeStringToFile(file_content_lda, parse_output[i]); } } JSONObject obj = new JSONObject();//an object to save the parsed content in elasticsearch obj.put("ParsedContent", parse_output[i]); String id = domain + "/" + quer + "/" + engine + "/" + total_links[i]; ReadInput ri = new ReadInput(); List<String> elasticIndexes = ri.GetKeyFile(config_path, "elasticSearchIndexes"); IndexRequest indexReq = new IndexRequest(elasticIndexes.get(3), "content", id); indexReq.source(obj); IndexResponse indexRes = client.index(indexReq).actionGet(); } } //node.close(); client.close(); String output_string_content = Integer.toString(counter_LDA_documents); TwitterAnalysis tw = new TwitterAnalysis();//we are going gather info from Twitter using Twitter4j String twitter_txt = tw.perform(quer, config_path); for (int i = 0; i < parse_output.length; i++) {//we combine all the parsed content into one document if (parse_output[i] != null) { output_string_content = output_string_content + "\n" + parse_output[i]; } } if (!(twitter_txt.equalsIgnoreCase("fail"))) { output_string_content = output_string_content + "\n" + twitter_txt;//we add also the twitter content } String directory = example_dir + engine + "/"; //call LDA File file_content_lda = new File(directory + "content_for_analysis.txt");//we are going to save the content also in txt format for backup and usage for LDA FileUtils.writeStringToFile(file_content_lda, output_string_content); if (LDAflag) { LDAcall ld = new LDAcall();//we call lda ld.call(nTopics, alpha, beta, niters, top_words, directory); } else if (TFIDFflag) { TFIDF tf = new TFIDF();//we call TFIDF topWordsTFIDF = tf.compute(parse_output, top_words, example_dir); } return parse_output; } catch (IOException | ElasticsearchException | ArrayIndexOutOfBoundsException ex) { Logger.getLogger(LinksParseAnalysis.class.getName()).log(Level.SEVERE, null, ex); return parse_output; } }
From source file:org.jfree.chart.demo.SerializationTest1.java
/** * Constructs a new demonstration application. * * @param title the frame title.//from ww w . j a v a 2 s . c o m */ public SerializationTest1(final String title) { super(title); this.series = new TimeSeries("Random Data", Millisecond.class); TimeSeriesCollection dataset = new TimeSeriesCollection(this.series); JFreeChart chart = createChart(dataset); // SERIALIZE - DESERIALIZE for testing purposes JFreeChart deserializedChart = null; try { final ByteArrayOutputStream buffer = new ByteArrayOutputStream(); final ObjectOutput out = new ObjectOutputStream(buffer); out.writeObject(chart); out.close(); chart = null; dataset = null; this.series = null; System.gc(); final ObjectInput in = new ObjectInputStream(new ByteArrayInputStream(buffer.toByteArray())); deserializedChart = (JFreeChart) in.readObject(); in.close(); } catch (Exception e) { e.printStackTrace(); } final TimeSeriesCollection c = (TimeSeriesCollection) deserializedChart.getXYPlot().getDataset(); this.series = c.getSeries(0); // FINISHED TEST final ChartPanel chartPanel = new ChartPanel(deserializedChart); final JButton button = new JButton("Add New Data Item"); button.setActionCommand("ADD_DATA"); button.addActionListener(this); final JPanel content = new JPanel(new BorderLayout()); content.add(chartPanel); content.add(button, BorderLayout.SOUTH); chartPanel.setPreferredSize(new java.awt.Dimension(500, 270)); setContentPane(content); }
From source file:com.ibuildapp.romanblack.CataloguePlugin.utils.Utils.java
/** * Opens Bitmap from file// www. j a v a 2s . c o m * * @param fileName - file path * @return */ public static Bitmap proccessBitmap(String fileName, Bitmap.Config config, int widthLimit) { Bitmap bitmap = null; File tempFile = null; BitmapFactory.Options opts = new BitmapFactory.Options(); try { // decode image with appropriate options tempFile = new File(fileName); opts.inJustDecodeBounds = true; BitmapFactory.decodeStream(new FileInputStream(tempFile), null, opts); } catch (Exception e) { } //Find the correct scale value. It should be the power of 2. int width = opts.outWidth, height = opts.outHeight; ; int scale = 1; while (true) { if (width / 2 <= widthLimit || height / 2 <= widthLimit) { break; } width /= 2; height /= 2; scale *= 2; } opts = new BitmapFactory.Options(); opts.inSampleSize = scale; opts.inPreferredConfig = config; try { System.gc(); bitmap = BitmapFactory.decodeStream(new FileInputStream(tempFile), null, opts); if (bitmap != null) { return bitmap; } } catch (Exception ex) { } catch (OutOfMemoryError e) { } try { Thread.sleep(300); } catch (InterruptedException e) { e.printStackTrace(); } try { System.gc(); bitmap = BitmapFactory.decodeStream(new FileInputStream(tempFile), null, opts); if (bitmap != null) { return bitmap; } } catch (Exception ex) { } catch (OutOfMemoryError ex) { } try { Thread.sleep(300); } catch (InterruptedException e) { e.printStackTrace(); } try { System.gc(); bitmap = BitmapFactory.decodeStream(new FileInputStream(tempFile), null, opts); } catch (Exception ex) { } catch (OutOfMemoryError ex) { } return bitmap; }