List of usage examples for java.io BufferedWriter BufferedWriter
public BufferedWriter(Writer out)
From source file:TwitterClustering.java
public static void main(String[] args) throws FileNotFoundException, IOException { // TODO code application logic here File outFile = new File(args[3]); Scanner s = new Scanner(new File(args[1])).useDelimiter(","); JSONParser parser = new JSONParser(); Set<Cluster> clusterSet = new HashSet<Cluster>(); HashMap<String, Tweet> tweets = new HashMap(); FileWriter fw = new FileWriter(outFile.getAbsoluteFile()); BufferedWriter bw = new BufferedWriter(fw); // init//from w w w .j ava2s . co m try { Object obj = parser.parse(new FileReader(args[2])); JSONArray jsonArray = (JSONArray) obj; for (int i = 0; i < jsonArray.size(); i++) { Tweet twt = new Tweet(); JSONObject jObj = (JSONObject) jsonArray.get(i); String text = jObj.get("text").toString(); long sum = 0; for (int y = 0; y < text.toCharArray().length; y++) { sum += (int) text.toCharArray()[y]; } String[] token = text.split(" "); String tID = jObj.get("id").toString(); Set<String> mySet = new HashSet<String>(Arrays.asList(token)); twt.setAttributeValue(sum); twt.setText(mySet); twt.setTweetID(tID); tweets.put(tID, twt); } // preparing initial clusters int i = 0; while (s.hasNext()) { String id = s.next();// id Tweet t = tweets.get(id.trim()); clusterSet.add(new Cluster(i + 1, t, new LinkedList())); i++; } Iterator it = tweets.entrySet().iterator(); for (int l = 0; l < 2; l++) { // limit to 25 iterations while (it.hasNext()) { Map.Entry me = (Map.Entry) it.next(); // calculate distance to each centroid Tweet p = (Tweet) me.getValue(); HashMap<Cluster, Float> distMap = new HashMap(); for (Cluster clust : clusterSet) { distMap.put(clust, jaccardDistance(p.getText(), clust.getCentroid().getText())); } HashMap<Cluster, Float> sorted = (HashMap<Cluster, Float>) sortByValue(distMap); sorted.keySet().iterator().next().getMembers().add(p); } // calculate new centroid and update Clusterset for (Cluster clust : clusterSet) { TreeMap<String, Long> tDistMap = new TreeMap(); Tweet newCentroid = null; Long avgSumDist = new Long(0); for (int j = 0; j < clust.getMembers().size(); j++) { avgSumDist += clust.getMembers().get(j).getAttributeValue(); tDistMap.put(clust.getMembers().get(j).getTweetID(), clust.getMembers().get(j).getAttributeValue()); } if (clust.getMembers().size() != 0) { avgSumDist /= (clust.getMembers().size()); } ArrayList<Long> listValues = new ArrayList<Long>(tDistMap.values()); if (tDistMap.containsValue(findClosestNumber(listValues, avgSumDist))) { // found closest newCentroid = tweets .get(getKeyByValue(tDistMap, findClosestNumber(listValues, avgSumDist))); clust.setCentroid(newCentroid); } } } // create an iterator Iterator iterator = clusterSet.iterator(); // check values while (iterator.hasNext()) { Cluster c = (Cluster) iterator.next(); bw.write(c.getId() + "\t"); System.out.print(c.getId() + "\t"); for (Tweet t : c.getMembers()) { bw.write(t.getTweetID() + ", "); System.out.print(t.getTweetID() + ","); } bw.write("\n"); System.out.println(""); } System.out.println(""); System.out.println("SSE " + sumSquaredErrror(clusterSet)); } catch (Exception e) { e.printStackTrace(); } finally { bw.close(); fw.close(); } }
From source file:bookChapter.theoretical.AnalyzeTheoreticalMSMSCalculation.java
/** * * @param args// w w w .java2 s . co m * @throws IOException * @throws FileNotFoundException * @throws ClassNotFoundException * @throws InterruptedException * @throws MzMLUnmarshallerException */ public static void main(String[] args) throws IOException, FileNotFoundException, ClassNotFoundException, IOException, InterruptedException, MzMLUnmarshallerException { Logger l = Logger.getLogger("AnalyzeTheoreticalMSMSCalculation"); Date date = Calendar.getInstance().getTime(); DateFormat formatter = new SimpleDateFormat("EEEE, dd MMMM yyyy, hh:mm:ss.SSS a"); String now = formatter.format(date); l.log(Level.INFO, "Calculation starts at {0}", now); double precursorTolerance = ConfigHolder.getInstance().getDouble("precursor.tolerance"), fragmentTolerance = ConfigHolder.getInstance().getDouble("fragment.tolerance"); String databaseName = ConfigHolder.getInstance().getString("database.name"), spectraName = ConfigHolder.getInstance().getString("spectra.name"), output = ConfigHolder.getInstance().getString("output"); int correctionFactor = ConfigHolder.getInstance().getInt("correctionFactor"); boolean theoFromAllCharges = ConfigHolder.getInstance().getBoolean("hasAllPossCharge"); BufferedWriter bw = new BufferedWriter(new FileWriter(output)); bw.write("SpectrumTitle" + "\t" + "PrecursorMZ" + "\t" + "PrecursorCharge" + "\t" + "Observed Mass (M+H)" + "\t" + "AndromedaLikeScore" + "\t" + "SequestLikeScore" + "\t" + "PeptideByAndromedaLikeScore" + "\t" + "PeptideBySequestLikeScore" + "\t" + "LevenshteinDistance" + "\t" + "TotalScoredPeps" + "\t" + "isCorrectMatchByAndromedaLike" + "\t" + "isCorrectMatchBySequestLikeScore" + "\n"); l.info("Getting database entries"); // first load all sequences into the memory HashSet<DBEntry> dbEntries = getDBEntries(databaseName); // for every spectrum-calculate both score... // now convert to binExperimental spectrum int num = 0; SpectrumFactory fct = SpectrumFactory.getInstance(); num = 0; File f = new File(spectraName); if (spectraName.endsWith(".mgf")) { fct.addSpectra(f, new WaitingHandlerCLIImpl()); l.log(Level.INFO, "Spectra scoring starts at {0}", now); for (String title : fct.getSpectrumTitles(f.getName())) { num++; MSnSpectrum ms = (MSnSpectrum) fct.getSpectrum(f.getName(), title); // here calculate all except this is an empty spectrum... if (ms.getPeakList().size() > 2) { // to check a spectrum with negative values.. String text = result(ms, precursorTolerance, dbEntries, fragmentTolerance, correctionFactor, theoFromAllCharges); if (!text.isEmpty()) { bw.write(text); } } if (num % 500 == 0) { l.info("Running " + num + " spectra." + Calendar.getInstance().getTime()); } } } l.info("Program finished at " + Calendar.getInstance().getTime()); bw.close(); }
From source file:com.cisco.dbds.utils.tims.TIMS.java
/** * The main method.//from w w w . j a v a 2s . c om * * @param args the arguments * @throws Exception the exception */ public static void main(String args[]) throws Exception { ConfigFileHandler.loadConfigFile("src/it/resources"); new FileOutputStream(fname).close(); outlog = new PrintWriter(new BufferedWriter(new FileWriter(fname, true))); //updatetimsresult("Ttv9629533c", "passed"); // initializeTIMSParameters( // System.getProperty("tims.userID").trim(), // System.getProperty("tims.projectID").trim(), // System.getProperty("tims.configID").trim(), // System.getProperty("tims.token").trim(), // System.getProperty("tims.sw").trim(), // System.getProperty("tims.platform").trim(), // System.getProperty("tims.browsertype").trim(), // System.getProperty("tims.browserversion").trim() // ); // // postTIMSsearch("Ttv9629533c"); updateHTMLtoTIMSresults(); // ArrayList<String> re1 = new ArrayList<String>(); // re1 = readhtmlfilePass(); // System.out.println(re1.size()+" bvvv "+re1); //createtimsresult("Tl4352381c", "failed","sample111555"); }
From source file:edu.cmu.lti.oaqa.annographix.apps.SolrQueryApp.java
public static void main(String[] args) { Options options = new Options(); options.addOption("u", null, true, "Solr URI"); options.addOption("q", null, true, "Query"); options.addOption("n", null, true, "Max # of results"); options.addOption("o", null, true, "An optional TREC-style output file"); options.addOption("w", null, false, "Do a warm-up query call, before each query"); CommandLineParser parser = new org.apache.commons.cli.GnuParser(); BufferedWriter trecOutFile = null; try {/*from w ww . j a va 2 s . co m*/ CommandLine cmd = parser.parse(options, args); String queryFile = null, solrURI = null; if (cmd.hasOption("u")) { solrURI = cmd.getOptionValue("u"); } else { Usage("Specify Solr URI"); } SolrServerWrapper solr = new SolrServerWrapper(solrURI); if (cmd.hasOption("q")) { queryFile = cmd.getOptionValue("q"); } else { Usage("Specify Query file"); } int numRet = 100; if (cmd.hasOption("n")) { numRet = Integer.parseInt(cmd.getOptionValue("n")); } if (cmd.hasOption("o")) { trecOutFile = new BufferedWriter(new FileWriter(new File(cmd.getOptionValue("o")))); } List<String> fieldList = new ArrayList<String>(); fieldList.add(UtilConst.ID_FIELD); fieldList.add(UtilConst.SCORE_FIELD); double totalTime = 0; double retQty = 0; ArrayList<Double> queryTimes = new ArrayList<Double>(); boolean bDoWarmUp = cmd.hasOption("w"); if (bDoWarmUp) { System.out.println("Using a warmup step!"); } int queryQty = 0; for (String t : FileUtils.readLines(new File(queryFile))) { t = t.trim(); if (t.isEmpty()) continue; int ind = t.indexOf('|'); if (ind < 0) throw new Exception("Wrong format, line: '" + t + "'"); String qID = t.substring(0, ind); String q = t.substring(ind + 1); SolrDocumentList res = null; if (bDoWarmUp) { res = solr.runQuery(q, fieldList, numRet); } Long tm1 = System.currentTimeMillis(); res = solr.runQuery(q, fieldList, numRet); Long tm2 = System.currentTimeMillis(); retQty += res.getNumFound(); System.out.println(qID + " Obtained: " + res.getNumFound() + " entries in " + (tm2 - tm1) + " ms"); double delta = (tm2 - tm1); totalTime += delta; queryTimes.add(delta); ++queryQty; if (trecOutFile != null) { ArrayList<SolrRes> resArr = new ArrayList<SolrRes>(); for (SolrDocument doc : res) { String id = (String) doc.getFieldValue(UtilConst.ID_FIELD); float score = (Float) doc.getFieldValue(UtilConst.SCORE_FIELD); resArr.add(new SolrRes(id, "", score)); } SolrRes[] results = resArr.toArray(new SolrRes[resArr.size()]); Arrays.sort(results); SolrEvalUtils.saveTrecResults(qID, results, trecOutFile, TREC_RUN, results.length); } } double devTime = 0, meanTime = totalTime / queryQty; for (int i = 0; i < queryQty; ++i) { double d = queryTimes.get(i) - meanTime; devTime += d * d; } devTime = Math.sqrt(devTime / (queryQty - 1)); System.out.println(String.format("Query time, mean/standard dev: %.2f/%.2f (ms)", meanTime, devTime)); System.out.println(String.format("Avg # of docs returned: %.2f", retQty / queryQty)); solr.close(); trecOutFile.close(); } catch (ParseException e) { Usage("Cannot parse arguments"); } catch (Exception e) { System.err.println("Terminating due to an exception: " + e); System.exit(1); } }
From source file:com.sat.spvgt.utils.tims.TIMS.java
/** * The main method./*from ww w . j a va 2 s .co m*/ * * @param args * the arguments * @throws Exception * the exception */ public static void main(String args[]) throws Exception { ConfigFileHandlerManager miscConfigFileHandler = new ConfigFileHandlerManager(); miscConfigFileHandler.loadConfigFileBasedOnPath("src/it/resources"); new FileOutputStream(fname).close(); outlog = new PrintWriter(new BufferedWriter(new FileWriter(fname, true))); // updatetimsresult("Ttv9629533c", "passed"); // initializeTIMSParameters( // System.getProperty("tims.userID").trim(), // System.getProperty("tims.projectID").trim(), // System.getProperty("tims.configID").trim(), // System.getProperty("tims.token").trim(), // System.getProperty("tims.sw").trim(), // System.getProperty("tims.platform").trim(), // System.getProperty("tims.browsertype").trim(), // System.getProperty("tims.browserversion").trim() // ); // // postTIMSsearch("Ttv9629533c"); updateHTMLtoTIMSresults(); // ArrayList<String> re1 = new ArrayList<String>(); // re1 = readhtmlfilePass(); // System.out.println(re1.size()+" bvvv "+re1); // createtimsresult("Tl4352381c", "failed","sample111555"); }
From source file:di.uniba.it.tee2.wiki.Wikidump2Text.java
/** * @param args the command line arguments *///from w ww. j av a 2s .c o m public static void main(String[] args) { try { CommandLine cmd = cmdParser.parse(options, args); if (cmd.hasOption("l") && cmd.hasOption("d") && cmd.hasOption("o")) { encoding = cmd.getOptionValue("e", "UTF-8"); int counter = 0; try { BufferedWriter writer = new BufferedWriter(new OutputStreamWriter( new GZIPOutputStream(new FileOutputStream(cmd.getOptionValue("o"))), "UTF-8")); WikipediaDumpIterator it = new WikipediaDumpIterator(new File(cmd.getOptionValue("d")), encoding); PageCleaner cleaner = PageCleanerWrapper.getInstance(cmd.getOptionValue("l")); while (it.hasNext()) { WikiPage wikiPage = it.next(); ParsedPage parsedPage = wikiPage.getParsedPage(); if (parsedPage != null) { String title = wikiPage.getTitle(); if (!title.matches(notValidTitle)) { if (parsedPage.getText() != null) { writer.append(cleaner.clean(parsedPage.getText())); writer.newLine(); writer.newLine(); counter++; if (counter % 10000 == 0) { System.out.println(counter); writer.flush(); } } } } } writer.flush(); writer.close(); } catch (Exception ex) { Logger.getLogger(Wikidump2Text.class.getName()).log(Level.SEVERE, null, ex); } System.out.println("Indexed pages: " + counter); } else { HelpFormatter helpFormatter = new HelpFormatter(); helpFormatter.printHelp("Wikipedia dump to text", options, true); } } catch (ParseException ex) { Logger.getLogger(Wikidump2Text.class.getName()).log(Level.SEVERE, null, ex); } }
From source file:com.tmo.swagger.main.GenrateSwaggerJson.java
public static void main(String[] args) throws JsonGenerationException, JsonMappingException, IOException, EmptyXlsRows { PropertyReader pr = new PropertyReader(); Properties prop = pr.readPropertiesFile(args[0]); //Properties prop =pr.readClassPathPropertyFile("common.properties"); String swaggerFile = prop.getProperty("swagger.json"); String sw = ""; if (swaggerFile != null && swaggerFile.length() > 0) { Swagger swagger = populatePropertiesOnlyPaths(prop, new SwaggerParser().read(swaggerFile)); ObjectMapper mapper = new ObjectMapper(); mapper.setSerializationInclusion(Include.NON_NULL); sw = mapper.writeValueAsString(swagger); } else {//from w w w.j a v a2s. c om ObjectMapper mapper = new ObjectMapper(); mapper.setSerializationInclusion(Include.NON_NULL); Swagger swagger = populateProperties(prop); sw = mapper.writeValueAsString(swagger); } try { File file = new File(args[1] + prop.getProperty("path.operation.tags") + ".json"); //File file = new File("src/main/resources/"+prop.getProperty("path.operation.tags")+".json"); if (!file.exists()) { file.createNewFile(); } FileWriter fw = new FileWriter(file.getAbsoluteFile()); BufferedWriter bw = new BufferedWriter(fw); bw.write(sw); logger.info("Swagger Genration Done!"); bw.close(); } catch (IOException e) { e.printStackTrace(); } }
From source file:de.jetwick.snacktory.HtmlFetcher.java
public static void main(String[] args) throws Exception { BufferedReader reader = new BufferedReader(new FileReader("urls.txt")); String line = null;//www . j ava2 s . co m Set<String> existing = new LinkedHashSet<String>(); while ((line = reader.readLine()) != null) { int index1 = line.indexOf("\""); int index2 = line.indexOf("\"", index1 + 1); String url = line.substring(index1 + 1, index2); String domainStr = SHelper.extractDomain(url, true); String counterStr = ""; // TODO more similarities if (existing.contains(domainStr)) counterStr = "2"; else existing.add(domainStr); String html = new HtmlFetcher().fetchAsString(url, 20000); String outFile = domainStr + counterStr + ".html"; BufferedWriter writer = new BufferedWriter(new FileWriter(outFile)); writer.write(html); writer.close(); } reader.close(); }
From source file:akori.Impact.java
static public void main(String[] args) throws IOException { String PATH = "E:\\Trabajos\\AKORI\\datosmatrizgino\\"; String PATHIMG = "E:\\NetBeansProjects\\AKORI\\Proccess_1\\ImagesPages\\"; for (int i = 1; i <= 32; ++i) { for (int k = 1; k <= 15; ++k) { System.out.println("Matrix " + i + "-" + k); BufferedImage img = null; try { img = ImageIO.read(new File(PATHIMG + i + ".png")); } catch (IOException ex) { ex.getStackTrace();//from w w w . j ava 2s . c om } int ymax = img.getHeight(); int xmax = img.getWidth(); double[][] imagen = new double[ymax][xmax]; BufferedReader in = null; try { in = new BufferedReader(new FileReader(PATH + i + "-" + k + ".txt")); } catch (FileNotFoundException ex) { ex.getStackTrace(); } String linea; ArrayList<String> lista = new ArrayList<String>(); HashMap<String, String> lista1 = new HashMap<String, String>(); try { for (int j = 0; (linea = in.readLine()) != null; ++j) { String[] datos = linea.split(","); int x = (int) Double.parseDouble(datos[1]); int y = (int) Double.parseDouble(datos[2]); if (x >= xmax || y >= ymax || x <= 0 || y <= 0) { continue; } lista.add(x + "," + y); } } catch (Exception ex) { ex.getStackTrace(); } try { in.close(); } catch (IOException ex) { ex.getStackTrace(); } Iterator iter = lista.iterator(); int[][] matrix = new int[lista.size()][2]; for (int j = 0; iter.hasNext(); ++j) { String xy = (String) iter.next(); String[] datos = xy.split(","); matrix[j][0] = Integer.parseInt(datos[0]); matrix[j][1] = Integer.parseInt(datos[1]); } for (int j = 0; j < matrix.length; ++j) { int std = 50; int x = matrix[j][0]; int y = matrix[j][1]; imagen[y][x] += 1; double aux; normalMatrix(imagen, y, x, std); } FileWriter fw = new FileWriter(PATH + "Matrix" + i + "-" + k + ".txt"); BufferedWriter bw = new BufferedWriter(fw); for (int j = 0; j < imagen.length; ++j) { for (int t = 0; t < imagen[j].length; ++t) { if (t + 1 == imagen[j].length) bw.write(imagen[j][t] + ""); else bw.write(imagen[j][t] + ","); } bw.write("\n"); } bw.close(); } } }
From source file:com.termmed.sampling.ConceptsWithMoreThanThreeRoleGroups.java
/** * The main method.//from w w w .ja v a2 s . com * * @param args the arguments * @throws Exception the exception */ public static void main(String[] args) throws Exception { System.out.println("Starting..."); Map<String, Set<String>> groupsMap = new HashMap<String, Set<String>>(); File relsFile = new File( "/Users/alo/Downloads/SnomedCT_RF2Release_INT_20160131-1/Snapshot/Terminology/sct2_Relationship_Snapshot_INT_20160131.txt"); BufferedReader br2 = new BufferedReader(new FileReader(relsFile)); String line2; int count2 = 0; while ((line2 = br2.readLine()) != null) { // process the line. count2++; if (count2 % 10000 == 0) { //System.out.println(count2); } List<String> columns = Arrays.asList(line2.split("\t", -1)); if (columns.size() >= 6) { if (columns.get(2).equals("1") && !columns.get(6).equals("0")) { if (!groupsMap.containsKey(columns.get(4))) { groupsMap.put(columns.get(4), new HashSet<String>()); } groupsMap.get(columns.get(4)).add(columns.get(6)); } } } System.out.println("Relationship groups loaded"); Gson gson = new Gson(); System.out.println("Reading JSON 1"); File crossoverFile1 = new File("/Users/alo/Downloads/crossover_role_to_group.json"); String contents = FileUtils.readFileToString(crossoverFile1, "utf-8"); Type collectionType = new TypeToken<Collection<ControlResultLine>>() { }.getType(); List<ControlResultLine> lineObject = gson.fromJson(contents, collectionType); Set<String> crossovers1 = new HashSet<String>(); for (ControlResultLine loopResult : lineObject) { crossovers1.add(loopResult.conceptId); } System.out.println("Crossovers 1 loaded, " + lineObject.size() + " Objects"); System.out.println("Reading JSON 2"); File crossoverFile2 = new File("/Users/alo/Downloads/crossover_group_to_group.json"); String contents2 = FileUtils.readFileToString(crossoverFile2, "utf-8"); List<ControlResultLine> lineObject2 = gson.fromJson(contents2, collectionType); Set<String> crossovers2 = new HashSet<String>(); for (ControlResultLine loopResult : lineObject2) { crossovers2.add(loopResult.conceptId); } System.out.println("Crossovers 2 loaded, " + lineObject2.size() + " Objects"); Set<String> foundConcepts = new HashSet<String>(); int count3 = 0; BufferedWriter writer = new BufferedWriter( new FileWriter(new File("ConceptsWithMoreThanThreeRoleGroups.csv"))); ; for (String loopConcept : groupsMap.keySet()) { if (groupsMap.get(loopConcept).size() > 3) { writer.write(loopConcept); writer.newLine(); foundConcepts.add(loopConcept); count3++; } } writer.close(); System.out.println("Found " + foundConcepts.size() + " concepts"); int countCrossover1 = 0; for (String loopConcept : foundConcepts) { if (crossovers1.contains(loopConcept)) { countCrossover1++; } } System.out.println(countCrossover1 + " are present in crossover_role_to_group"); int countCrossover2 = 0; for (String loopConcept : foundConcepts) { if (crossovers2.contains(loopConcept)) { countCrossover2++; } } System.out.println(countCrossover2 + " are present in crossover_group_to_group"); System.out.println("Done"); }