List of usage examples for java.io BufferedWriter write
public void write(int c) throws IOException
From source file:com.idega.util.Stripper.java
public static void main(String[] args) { // Stripper stripper1 = new Stripper(); if (args.length != 2) { System.err.println("Auli. tt a hafa tvo parametra me essu, innskr og tskr"); return;/*from www. j a v a 2 s . co m*/ } BufferedReader in = null; BufferedWriter out = null; try { in = new BufferedReader(new FileReader(args[0])); } catch (java.io.FileNotFoundException e) { System.err.println("Auli. Error : " + e.toString()); return; } try { out = new BufferedWriter(new FileWriter(args[1])); } catch (java.io.IOException e) { System.err.println("Auli. Error : " + e.toString()); IOUtils.closeQuietly(in); return; } try { String input = in.readLine(); int count = 0; while (input != null) { int index = input.indexOf("\\CVS\\"); if (index > -1) { System.out.println("Skipping : " + input); count++; } else { out.write(input); out.newLine(); } input = in.readLine(); } System.out.println("Skipped : " + count); } catch (java.io.IOException e) { System.err.println("Error reading or writing file : " + e.toString()); } try { in.close(); out.close(); } catch (java.io.IOException e) { System.err.println("Error closing files : " + e.toString()); } }
From source file:main.java.RMDupper.java
public static void main(String[] args) throws IOException { System.err.println("DeDup v" + VERSION); // the command line parameters Options helpOptions = new Options(); helpOptions.addOption("h", "help", false, "show this help page"); Options options = new Options(); options.addOption("h", "help", false, "show this help page"); options.addOption("i", "input", true, "the input file if this option is not specified,\nthe input is expected to be piped in"); options.addOption("o", "output", true, "the output folder. Has to be specified if input is set."); options.addOption("m", "merged", false, "the input only contains merged reads.\n If this option is specified read names are not examined for prefixes.\n Both the start and end of the aligment are considered for all reads."); options.addOption("v", "version", false, "the version of DeDup."); HelpFormatter helpformatter = new HelpFormatter(); CommandLineParser parser = new BasicParser(); try {/* w w w . j a v a 2s.c o m*/ CommandLine cmd = parser.parse(helpOptions, args); if (cmd.hasOption('h')) { helpformatter.printHelp(CLASS_NAME, options); System.exit(0); } } catch (ParseException e1) { } String input = ""; String outputpath = ""; Boolean merged = Boolean.FALSE; try { CommandLine cmd = parser.parse(options, args); if (cmd.hasOption('i')) { input = cmd.getOptionValue('i'); piped = false; } if (cmd.hasOption('o')) { outputpath = cmd.getOptionValue('o'); } if (cmd.hasOption('m')) { merged = Boolean.TRUE; } if (cmd.hasOption('v')) { System.out.println("DeDup v" + VERSION); System.exit(0); } } catch (ParseException e) { helpformatter.printHelp(CLASS_NAME, options); System.err.println(e.getMessage()); System.exit(0); } DecimalFormat df = new DecimalFormat("##.##"); if (piped) { RMDupper rmdup = new RMDupper(System.in, System.out, merged); rmdup.readSAMFile(); System.err.println("We are in piping mode!"); System.err.println("Total reads: " + rmdup.dupStats.total + "\n"); System.err.println("Reverse removed: " + rmdup.dupStats.removed_reverse + "\n"); System.err.println("Forward removed: " + rmdup.dupStats.removed_forward + "\n"); System.err.println("Merged removed: " + rmdup.dupStats.removed_merged + "\n"); System.err.println("Total removed: " + (rmdup.dupStats.removed_forward + rmdup.dupStats.removed_merged + rmdup.dupStats.removed_reverse) + "\n"); if (rmdup.dupStats.removed_merged + rmdup.dupStats.removed_forward + rmdup.dupStats.removed_reverse == 0) { System.err.println("Duplication Rate: " + df.format(0.00)); } else { System.err.println("Duplication Rate: " + df.format((double) (rmdup.dupStats.removed_merged + rmdup.dupStats.removed_reverse + rmdup.dupStats.removed_forward) / (double) rmdup.dupStats.total)); } } else { if (outputpath.length() == 0) { System.err.println("The output folder has to be specified"); helpformatter.printHelp(CLASS_NAME, options); System.exit(0); } //Check whether we have a directory as output path, else produce error message and quit! File f = new File(outputpath); if (!f.isDirectory()) { System.err.println("The output folder should be a folder and not a file!"); System.exit(0); } File inputFile = new File(input); File outputFile = new File( outputpath + "/" + Files.getNameWithoutExtension(inputFile.getAbsolutePath()) + "_rmdup.bam"); File outputlog = new File( outputpath + "/" + Files.getNameWithoutExtension(inputFile.getAbsolutePath()) + ".log"); File outputhist = new File( outputpath + "/" + Files.getNameWithoutExtension(inputFile.getAbsolutePath()) + ".hist"); try { FileWriter fw = new FileWriter(outputlog); FileWriter histfw = new FileWriter(outputhist); BufferedWriter bfw = new BufferedWriter(fw); BufferedWriter histbfw = new BufferedWriter(histfw); RMDupper rmdup = new RMDupper(inputFile, outputFile, merged); rmdup.readSAMFile(); rmdup.inputSam.close(); rmdup.outputSam.close(); bfw.write("Total reads: " + rmdup.dupStats.total + "\n"); bfw.write("Reverse removed: " + rmdup.dupStats.removed_reverse + "\n"); bfw.write("Forward removed: " + rmdup.dupStats.removed_forward + "\n"); bfw.write("Merged removed: " + rmdup.dupStats.removed_merged + "\n"); bfw.write("Total removed: " + (rmdup.dupStats.removed_forward + rmdup.dupStats.removed_merged + rmdup.dupStats.removed_reverse) + "\n"); bfw.write("Duplication Rate: " + df.format((double) (rmdup.dupStats.removed_merged + rmdup.dupStats.removed_reverse + rmdup.dupStats.removed_forward) / (double) rmdup.dupStats.total)); bfw.flush(); bfw.close(); histbfw.write(rmdup.oc.getHistogram()); histbfw.flush(); histbfw.close(); System.out.println("Total reads: " + rmdup.dupStats.total + "\n"); System.out.println("Unmerged removed: " + (rmdup.dupStats.removed_forward + rmdup.dupStats.removed_reverse) + "\n"); System.out.println("Merged removed: " + rmdup.dupStats.removed_merged + "\n"); System.out.println("Total removed: " + (rmdup.dupStats.removed_forward + rmdup.dupStats.removed_merged + rmdup.dupStats.removed_reverse) + "\n"); if (rmdup.dupStats.removed_merged + rmdup.dupStats.removed_forward + rmdup.dupStats.removed_reverse == 0) { System.out.println("Duplication Rate: " + df.format(0.00)); } else { System.out.println("Duplication Rate: " + df.format((double) (rmdup.dupStats.removed_merged + rmdup.dupStats.removed_reverse + rmdup.dupStats.removed_forward) / (double) rmdup.dupStats.total)); } } catch (IOException e) { e.printStackTrace(); } } }
From source file:ivory.core.tokenize.Tokenizer.java
@SuppressWarnings("static-access") public static void main(String[] args) { Options options = new Options(); options.addOption(OptionBuilder.withArgName("full path to model file or directory").hasArg() .withDescription("model file").create("model")); options.addOption(OptionBuilder.withArgName("full path to input file").hasArg() .withDescription("input file").isRequired().create("input")); options.addOption(OptionBuilder.withArgName("full path to output file").hasArg() .withDescription("output file").isRequired().create("output")); options.addOption(OptionBuilder.withArgName("en | zh | de | fr | ar | tr | es").hasArg() .withDescription("2-character language code").isRequired().create("lang")); options.addOption(OptionBuilder.withArgName("path to stopwords list").hasArg() .withDescription("one stopword per line").create("stopword")); options.addOption(OptionBuilder.withArgName("path to stemmed stopwords list").hasArg() .withDescription("one stemmed stopword per line").create("stemmed_stopword")); options.addOption(OptionBuilder.withArgName("true|false").hasArg().withDescription("turn on/off stemming") .create("stem")); options.addOption(OptionBuilder.withDescription("Hadoop option to load external jars") .withArgName("jar packages").hasArg().create("libjars")); CommandLine cmdline;/*from w w w .ja va 2 s . c o m*/ CommandLineParser parser = new GnuParser(); try { String stopwordList = null, stemmedStopwordList = null, modelFile = null; boolean isStem = true; cmdline = parser.parse(options, args); if (cmdline.hasOption("stopword")) { stopwordList = cmdline.getOptionValue("stopword"); } if (cmdline.hasOption("stemmed_stopword")) { stemmedStopwordList = cmdline.getOptionValue("stemmed_stopword"); } if (cmdline.hasOption("stem")) { isStem = Boolean.parseBoolean(cmdline.getOptionValue("stem")); } if (cmdline.hasOption("model")) { modelFile = cmdline.getOptionValue("model"); } ivory.core.tokenize.Tokenizer tokenizer = TokenizerFactory.createTokenizer( cmdline.getOptionValue("lang"), modelFile, isStem, stopwordList, stemmedStopwordList, null); BufferedWriter out = new BufferedWriter( new OutputStreamWriter(new FileOutputStream(cmdline.getOptionValue("output")), "UTF8")); BufferedReader in = new BufferedReader( new InputStreamReader(new FileInputStream(cmdline.getOptionValue("input")), "UTF8")); String line = null; while ((line = in.readLine()) != null) { String[] tokens = tokenizer.processContent(line); String s = ""; for (String token : tokens) { s += token + " "; } out.write(s.trim() + "\n"); } in.close(); out.close(); } catch (Exception exp) { System.out.println(exp); HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("Tokenizer", options); System.exit(-1); } }
From source file:com.bright.json.JSonRequestor.java
public static void main(String[] args) { String fileBasename = null;// w w w . j a v a 2 s . c om String[] zipArgs = null; JFileChooser chooser = new JFileChooser("/Users/panos/STR_GRID"); try { chooser.setCurrentDirectory(new java.io.File(".")); chooser.setDialogTitle("Select the input directory"); chooser.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY); chooser.setAcceptAllFileFilterUsed(false); if (chooser.showOpenDialog(null) == JFileChooser.APPROVE_OPTION) { System.out.println("getCurrentDirectory(): " + chooser.getCurrentDirectory()); System.out.println("getSelectedFile() : " + chooser.getSelectedFile()); // String fileBasename = // chooser.getSelectedFile().toString().substring(chooser.getSelectedFile().toString().lastIndexOf(File.separator)+1,chooser.getSelectedFile().toString().lastIndexOf(".")); fileBasename = chooser.getSelectedFile().toString() .substring(chooser.getSelectedFile().toString().lastIndexOf(File.separator) + 1); System.out.println("Base name: " + fileBasename); zipArgs = new String[] { chooser.getSelectedFile().toString(), chooser.getCurrentDirectory().toString() + File.separator + fileBasename + ".zip" }; com.bright.utils.ZipFile.main(zipArgs); } else { System.out.println("No Selection "); } } catch (Exception e) { System.out.println(e.toString()); } JTextField uiHost = new JTextField("ucs-head.brightcomputing.com"); // TextPrompt puiHost = new // TextPrompt("hadoop.brightcomputing.com",uiHost); JTextField uiUser = new JTextField("nexus"); // TextPrompt puiUser = new TextPrompt("nexus", uiUser); JTextField uiPass = new JPasswordField("system"); // TextPrompt puiPass = new TextPrompt("", uiPass); JTextField uiWdir = new JTextField("/home/nexus/pp1234"); // TextPrompt puiWdir = new TextPrompt("/home/nexus/nexus_workdir", // uiWdir); JTextField uiOut = new JTextField("foo"); // TextPrompt puiOut = new TextPrompt("foobar123", uiOut); JPanel myPanel = new JPanel(new GridLayout(5, 1)); myPanel.add(new JLabel("Bright HeadNode hostname:")); myPanel.add(uiHost); // myPanel.add(Box.createHorizontalStrut(1)); // a spacer myPanel.add(new JLabel("Username:")); myPanel.add(uiUser); myPanel.add(new JLabel("Password:")); myPanel.add(uiPass); myPanel.add(new JLabel("Working Directory:")); myPanel.add(uiWdir); // myPanel.add(Box.createHorizontalStrut(1)); // a spacer myPanel.add(new JLabel("Output Study Name ( -s ):")); myPanel.add(uiOut); int result = JOptionPane.showConfirmDialog(null, myPanel, "Please fill in all the fields.", JOptionPane.OK_CANCEL_OPTION); if (result == JOptionPane.OK_OPTION) { System.out.println("Input received."); } String rfile = uiWdir.getText(); String rhost = uiHost.getText(); String ruser = uiUser.getText(); String rpass = uiPass.getText(); String nexusOut = uiOut.getText(); String[] myarg = new String[] { zipArgs[1], ruser + "@" + rhost + ":" + rfile, nexusOut, fileBasename }; com.bright.utils.ScpTo.main(myarg); String cmURL = "https://" + rhost + ":8081/json"; List<Cookie> cookies = doLogin(ruser, rpass, cmURL); chkVersion(cmURL, cookies); jobSubmit myjob = new jobSubmit(); jobSubmit.jobObject myjobObj = new jobSubmit.jobObject(); myjob.setService("cmjob"); myjob.setCall("submitJob"); myjobObj.setQueue("defq"); myjobObj.setJobname("myNexusJob"); myjobObj.setAccount(ruser); myjobObj.setRundirectory(rfile); myjobObj.setUsername(ruser); myjobObj.setGroupname("cmsupport"); myjobObj.setPriority("1"); myjobObj.setStdinfile(rfile + "/stdin-mpi"); myjobObj.setStdoutfile(rfile + "/stdout-mpi"); myjobObj.setStderrfile(rfile + "/stderr-mpi"); myjobObj.setResourceList(Arrays.asList("")); myjobObj.setDependencies(Arrays.asList("")); myjobObj.setMailNotify(false); myjobObj.setMailOptions("ALL"); myjobObj.setMaxWallClock("00:10:00"); myjobObj.setNumberOfProcesses(1); myjobObj.setNumberOfNodes(1); myjobObj.setNodes(Arrays.asList("")); myjobObj.setCommandLineInterpreter("/bin/bash"); myjobObj.setUserdefined(Arrays.asList("cd " + rfile, "date", "pwd")); myjobObj.setExecutable("mpirun"); myjobObj.setArguments("-env I_MPI_FABRICS shm:tcp " + Constants.NEXUSSIM_EXEC + " -mpi -c " + rfile + "/" + fileBasename + "/" + fileBasename + " -s " + rfile + "/" + fileBasename + "/" + nexusOut); myjobObj.setModules(Arrays.asList("shared", "nexus", "intel-mpi/64")); myjobObj.setDebug(false); myjobObj.setBaseType("Job"); myjobObj.setIsSlurm(true); myjobObj.setUniqueKey(0); myjobObj.setModified(false); myjobObj.setToBeRemoved(false); myjobObj.setChildType("SlurmJob"); myjobObj.setJobID("Nexus test"); // Map<String,jobSubmit.jobObject > mymap= new HashMap<String, // jobSubmit.jobObject>(); // mymap.put("Slurm",myjobObj); ArrayList<Object> mylist = new ArrayList<Object>(); mylist.add("slurm"); mylist.add(myjobObj); myjob.setArgs(mylist); GsonBuilder builder = new GsonBuilder(); builder.enableComplexMapKeySerialization(); // Gson g = new Gson(); Gson g = builder.create(); String json2 = g.toJson(myjob); // To be used from a real console and not Eclipse Delete.main(zipArgs[1]); String message = JSonRequestor.doRequest(json2, cmURL, cookies); @SuppressWarnings("resource") Scanner resInt = new Scanner(message).useDelimiter("[^0-9]+"); int jobID = resInt.nextInt(); System.out.println("Job ID: " + jobID); JOptionPane optionPane = new JOptionPane(message); JDialog myDialog = optionPane.createDialog(null, "CMDaemon response: "); myDialog.setModal(false); myDialog.setVisible(true); ArrayList<Object> mylist2 = new ArrayList<Object>(); mylist2.add("slurm"); String JobID = Integer.toString(jobID); mylist2.add(JobID); myjob.setArgs(mylist2); myjob.setService("cmjob"); myjob.setCall("getJob"); String json3 = g.toJson(myjob); System.out.println("JSON Request No. 4 " + json3); cmReadFile readfile = new cmReadFile(); readfile.setService("cmmain"); readfile.setCall("readFile"); readfile.setUserName(ruser); int fileByteIdx = 1; readfile.setPath(rfile + "/" + fileBasename + "/" + fileBasename + ".sum@+" + fileByteIdx); String json4 = g.toJson(readfile); String monFile = JSonRequestor.doRequest(json4, cmURL, cookies).replaceAll("^\"|\"$", ""); if (monFile.startsWith("Unable")) { monFile = ""; } else { fileByteIdx += countLines(monFile, "\\\\n"); System.out.println(""); } StringBuffer output = new StringBuffer(); // Get the correct Line Separator for the OS (CRLF or LF) String nl = System.getProperty("line.separator"); String filename = chooser.getCurrentDirectory().toString() + File.separator + fileBasename + ".sum.txt"; System.out.println("Local monitoring file: " + filename); output.append(monFile.replaceAll("\\\\n", System.getProperty("line.separator"))); String getJobJSON = JSonRequestor.doRequest(json3, cmURL, cookies); jobGet getJobObj = new Gson().fromJson(getJobJSON, jobGet.class); System.out.println("Job " + jobID + " status: " + getJobObj.getStatus().toString()); while (getJobObj.getStatus().toString().equals("RUNNING") || getJobObj.getStatus().toString().equals("COMPLETING")) { try { getJobJSON = JSonRequestor.doRequest(json3, cmURL, cookies); getJobObj = new Gson().fromJson(getJobJSON, jobGet.class); System.out.println("Job " + jobID + " status: " + getJobObj.getStatus().toString()); readfile.setPath(rfile + "/" + fileBasename + "/" + fileBasename + ".sum@+" + fileByteIdx); json4 = g.toJson(readfile); monFile = JSonRequestor.doRequest(json4, cmURL, cookies).replaceAll("^\"|\"$", ""); if (monFile.startsWith("Unable")) { monFile = ""; } else { output.append(monFile.replaceAll("\\\\n", System.getProperty("line.separator"))); System.out.println("FILE INDEX:" + fileByteIdx); fileByteIdx += countLines(monFile, "\\\\n"); } Thread.sleep(Constants.STATUS_CHECK_INTERVAL); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } } Gson gson_nice = new GsonBuilder().setPrettyPrinting().create(); String json_out = gson_nice.toJson(getJobJSON); System.out.println(json_out); System.out.println("JSON Request No. 5 " + json4); readfile.setPath(rfile + "/" + fileBasename + "/" + fileBasename + ".sum@+" + fileByteIdx); json4 = g.toJson(readfile); monFile = JSonRequestor.doRequest(json4, cmURL, cookies).replaceAll("^\"|\"$", ""); if (monFile.startsWith("Unable")) { monFile = ""; } else { output.append(monFile.replaceAll("\\\\n", System.getProperty("line.separator"))); fileByteIdx += countLines(monFile, "\\\\n"); } System.out.println("FILE INDEX:" + fileByteIdx); /* * System.out.print("Monitoring file: " + monFile.replaceAll("\\n", * System.getProperty("line.separator"))); try { * FileUtils.writeStringToFile( new * File(chooser.getCurrentDirectory().toString() + File.separator + * fileBasename + ".sum.txt"), monFile.replaceAll("\\n", * System.getProperty("line.separator"))); } catch (IOException e) { * * e.printStackTrace(); } */ if (getJobObj.getStatus().toString().equals("COMPLETED")) { String[] zipArgs_from = new String[] { chooser.getSelectedFile().toString(), chooser.getCurrentDirectory().toString() + File.separator + fileBasename + "_out.zip" }; String[] myarg_from = new String[] { ruser + "@" + rhost + ":" + rfile + "/" + fileBasename + "_out.zip", zipArgs_from[1], rfile, fileBasename }; com.bright.utils.ScpFrom.main(myarg_from); JOptionPane optionPaneS = new JOptionPane("Job execution completed without errors!"); JDialog myDialogS = optionPaneS.createDialog(null, "Job status: "); myDialogS.setModal(false); myDialogS.setVisible(true); } else { JOptionPane optionPaneF = new JOptionPane("Job execution FAILED!"); JDialog myDialogF = optionPaneF.createDialog(null, "Job status: "); myDialogF.setModal(false); myDialogF.setVisible(true); } try { System.out.println("Local monitoring file: " + filename); BufferedWriter out = new BufferedWriter(new FileWriter(filename)); String outText = output.toString(); String newString = outText.replace("\\\\n", nl); System.out.println("Text: " + outText); out.write(newString); out.close(); rmDuplicateLines.main(filename); } catch (IOException e) { e.printStackTrace(); } doLogout(cmURL, cookies); System.exit(0); }
From source file:json_to_xml_1.java
public static void main(String args[]) { System.out.print("json_to_xml_1 workflow Copyright (C) 2016 Stephan Kreutzer\n" + "This program comes with ABSOLUTELY NO WARRANTY.\n" + "This is free software, and you are welcome to redistribute it\n" + "under certain conditions. See the GNU Affero General Public License 3\n" + "or any later version for details. Also, see the source code repository\n" + "https://github.com/publishing-systems/digital_publishing_workflow_tools/ and\n" + "the project website http://www.publishing-systems.org.\n\n"); json_to_xml_1 converter = json_to_xml_1.getInstance(); converter.getInfoMessages().clear(); try {//from w ww . j ava 2 s . c om converter.execute(args); } catch (ProgramTerminationException ex) { converter.handleTermination(ex); } if (converter.resultInfoFile != null) { try { BufferedWriter writer = new BufferedWriter( new OutputStreamWriter(new FileOutputStream(converter.resultInfoFile), "UTF-8")); writer.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); writer.write( "<!-- This file was created by json_to_xml_1, which is free software licensed under the GNU Affero General Public License 3 or any later version (see https://github.com/publishing-systems/digital_publishing_workflow_tools/ and http://www.publishing-systems.org). -->\n"); writer.write("<json-to-xml-1-result-information>\n"); if (converter.getInfoMessages().size() <= 0) { writer.write(" <success/>\n"); } else { writer.write(" <success>\n"); writer.write(" <info-messages>\n"); for (int i = 0, max = converter.getInfoMessages().size(); i < max; i++) { InfoMessage infoMessage = converter.getInfoMessages().get(i); writer.write(" <info-message number=\"" + i + "\">\n"); writer.write(" <timestamp>" + infoMessage.getTimestamp() + "</timestamp>\n"); String infoMessageText = infoMessage.getMessage(); String infoMessageId = infoMessage.getId(); String infoMessageBundle = infoMessage.getBundle(); Object[] infoMessageArguments = infoMessage.getArguments(); if (infoMessageBundle != null) { // Ampersand needs to be the first, otherwise it would double-encode // other entities. infoMessageBundle = infoMessageBundle.replaceAll("&", "&"); infoMessageBundle = infoMessageBundle.replaceAll("<", "<"); infoMessageBundle = infoMessageBundle.replaceAll(">", ">"); writer.write(" <id-bundle>" + infoMessageBundle + "</id-bundle>\n"); } if (infoMessageId != null) { // Ampersand needs to be the first, otherwise it would double-encode // other entities. infoMessageId = infoMessageId.replaceAll("&", "&"); infoMessageId = infoMessageId.replaceAll("<", "<"); infoMessageId = infoMessageId.replaceAll(">", ">"); writer.write(" <id>" + infoMessageId + "</id>\n"); } if (infoMessageText != null) { // Ampersand needs to be the first, otherwise it would double-encode // other entities. infoMessageText = infoMessageText.replaceAll("&", "&"); infoMessageText = infoMessageText.replaceAll("<", "<"); infoMessageText = infoMessageText.replaceAll(">", ">"); writer.write(" <message>" + infoMessageText + "</message>\n"); } if (infoMessageArguments != null) { writer.write(" <arguments>\n"); int argumentCount = infoMessageArguments.length; for (int j = 0; j < argumentCount; j++) { if (infoMessageArguments[j] == null) { writer.write(" <argument number=\"" + j + "\">\n"); writer.write(" <class></class>\n"); writer.write(" <value>null</value>\n"); writer.write(" </argument>\n"); continue; } String className = infoMessageArguments[j].getClass().getName(); // Ampersand needs to be the first, otherwise it would double-encode // other entities. className = className.replaceAll("&", "&"); className = className.replaceAll("<", "<"); className = className.replaceAll(">", ">"); String value = infoMessageArguments[j].toString(); // Ampersand needs to be the first, otherwise it would double-encode // other entities. value = value.replaceAll("&", "&"); value = value.replaceAll("<", "<"); value = value.replaceAll(">", ">"); writer.write(" <argument number=\"" + j + "\">\n"); writer.write(" <class>" + className + "</class>\n"); writer.write(" <value>" + value + "</value>\n"); writer.write(" </argument>\n"); } writer.write(" </arguments>\n"); } Exception exception = infoMessage.getException(); if (exception != null) { writer.write(" <exception>\n"); String className = exception.getClass().getName(); // Ampersand needs to be the first, otherwise it would double-encode // other entities. className = className.replaceAll("&", "&"); className = className.replaceAll("<", "<"); className = className.replaceAll(">", ">"); writer.write(" <class>" + className + "</class>\n"); StringWriter stringWriter = new StringWriter(); PrintWriter printWriter = new PrintWriter(stringWriter); exception.printStackTrace(printWriter); String stackTrace = stringWriter.toString(); // Ampersand needs to be the first, otherwise it would double-encode // other entities. stackTrace = stackTrace.replaceAll("&", "&"); stackTrace = stackTrace.replaceAll("<", "<"); stackTrace = stackTrace.replaceAll(">", ">"); writer.write(" <stack-trace>" + stackTrace + "</stack-trace>\n"); writer.write(" </exception>\n"); } writer.write(" </info-message>\n"); } writer.write(" </info-messages>\n"); writer.write(" </success>\n"); } writer.write("</json-to-xml-1-result-information>\n"); writer.flush(); writer.close(); } catch (FileNotFoundException ex) { ex.printStackTrace(); System.exit(-1); } catch (UnsupportedEncodingException ex) { ex.printStackTrace(); System.exit(-1); } catch (IOException ex) { ex.printStackTrace(); System.exit(-1); } } converter.getInfoMessages().clear(); converter.resultInfoFile = null; }
From source file:apps.ParsedPost.java
public static void main(String args[]) { Options options = new Options(); options.addOption(INPUT_PARAM, null, true, INPUT_DESC); options.addOption(OUTPUT_PARAM, null, true, OUTPUT_DESC); options.addOption(MAX_NUM_REC_PARAM, null, true, MAX_NUM_REC_DESC); options.addOption(DEBUG_PRINT_PARAM, null, false, DEBUG_PRINT_DESC); options.addOption(EXCLUDE_CODE_PARAM, null, false, EXCLUDE_CODE_DESC); CommandLineParser parser = new org.apache.commons.cli.GnuParser(); HashMap<String, ParsedPost> hQuestions = new HashMap<String, ParsedPost>(); try {/* w w w. j ava2 s . c om*/ CommandLine cmd = parser.parse(options, args); String inputFile = cmd.getOptionValue(INPUT_PARAM); if (null == inputFile) Usage("Specify: " + INPUT_PARAM, options); String outputFile = cmd.getOptionValue(OUTPUT_PARAM); if (null == outputFile) Usage("Specify: " + OUTPUT_PARAM, options); InputStream input = CompressUtils.createInputStream(inputFile); BufferedWriter output = new BufferedWriter(new FileWriter(new File(outputFile))); int maxNumRec = Integer.MAX_VALUE; String tmp = cmd.getOptionValue(MAX_NUM_REC_PARAM); if (tmp != null) maxNumRec = Integer.parseInt(tmp); boolean debug = cmd.hasOption(DEBUG_PRINT_PARAM); boolean excludeCode = cmd.hasOption(EXCLUDE_CODE_PARAM); System.out.println("Processing at most " + maxNumRec + " records, excluding code? " + excludeCode); XmlIterator xi = new XmlIterator(input, ROOT_POST_TAG); String elem; output.write("<?xml version='1.0' encoding='UTF-8'?><ystfeed>\n"); for (int num = 1; num <= maxNumRec && !(elem = xi.readNext()).isEmpty(); ++num) { ParsedPost post = null; try { post = parsePost(elem, excludeCode); if (!post.mAcceptedAnswerId.isEmpty()) { hQuestions.put(post.mId, post); } else if (post.mpostIdType.equals("2")) { String parentId = post.mParentId; String id = post.mId; if (!parentId.isEmpty()) { ParsedPost parentPost = hQuestions.get(parentId); if (parentPost != null && parentPost.mAcceptedAnswerId.equals(id)) { output.write(createYahooAnswersQuestion(parentPost, post)); hQuestions.remove(parentId); } } } } catch (Exception e) { e.printStackTrace(); throw new Exception("Error parsing record # " + num + ", error message: " + e); } if (debug) { System.out.println(String.format("%s parentId=%s acceptedAnswerId=%s type=%s", post.mId, post.mParentId, post.mAcceptedAnswerId, post.mpostIdType)); System.out.println("================================"); if (!post.mTitle.isEmpty()) { System.out.println(post.mTitle); System.out.println("--------------------------------"); } System.out.println(post.mBody); System.out.println("================================"); } } output.write("</ystfeed>\n"); input.close(); output.close(); } catch (ParseException e) { Usage("Cannot parse arguments", options); } catch (Exception e) { e.printStackTrace(); System.err.println("Terminating due to an exception: " + e); System.exit(1); } }
From source file:edu.cmu.lti.oaqa.knn4qa.apps.ParsedPost.java
public static void main(String args[]) { Options options = new Options(); options.addOption(INPUT_PARAM, null, true, INPUT_DESC); options.addOption(OUTPUT_PARAM, null, true, OUTPUT_DESC); options.addOption(CommonParams.MAX_NUM_REC_PARAM, null, true, CommonParams.MAX_NUM_REC_DESC); options.addOption(DEBUG_PRINT_PARAM, null, false, DEBUG_PRINT_DESC); options.addOption(EXCLUDE_CODE_PARAM, null, false, EXCLUDE_CODE_DESC); CommandLineParser parser = new org.apache.commons.cli.GnuParser(); HashMap<String, ParsedPost> hQuestions = new HashMap<String, ParsedPost>(); try {// ww w. j a v a 2s.co m CommandLine cmd = parser.parse(options, args); String inputFile = cmd.getOptionValue(INPUT_PARAM); if (null == inputFile) Usage("Specify: " + INPUT_PARAM, options); String outputFile = cmd.getOptionValue(OUTPUT_PARAM); if (null == outputFile) Usage("Specify: " + OUTPUT_PARAM, options); InputStream input = CompressUtils.createInputStream(inputFile); BufferedWriter output = new BufferedWriter(new FileWriter(new File(outputFile))); int maxNumRec = Integer.MAX_VALUE; String tmp = cmd.getOptionValue(CommonParams.MAX_NUM_REC_PARAM); if (tmp != null) maxNumRec = Integer.parseInt(tmp); boolean debug = cmd.hasOption(DEBUG_PRINT_PARAM); boolean excludeCode = cmd.hasOption(EXCLUDE_CODE_PARAM); System.out.println("Processing at most " + maxNumRec + " records, excluding code? " + excludeCode); XmlIterator xi = new XmlIterator(input, ROOT_POST_TAG); String elem; output.write("<?xml version='1.0' encoding='UTF-8'?><ystfeed>\n"); for (int num = 1; num <= maxNumRec && !(elem = xi.readNext()).isEmpty(); ++num) { ParsedPost post = null; try { post = parsePost(elem, excludeCode); if (!post.mAcceptedAnswerId.isEmpty()) { hQuestions.put(post.mId, post); } else if (post.mpostIdType.equals("2")) { String parentId = post.mParentId; String id = post.mId; if (!parentId.isEmpty()) { ParsedPost parentPost = hQuestions.get(parentId); if (parentPost != null && parentPost.mAcceptedAnswerId.equals(id)) { output.write(createYahooAnswersQuestion(parentPost, post)); hQuestions.remove(parentId); } } } } catch (Exception e) { e.printStackTrace(); throw new Exception("Error parsing record # " + num + ", error message: " + e); } if (debug) { System.out.println(String.format("%s parentId=%s acceptedAnswerId=%s type=%s", post.mId, post.mParentId, post.mAcceptedAnswerId, post.mpostIdType)); System.out.println("================================"); if (!post.mTitle.isEmpty()) { System.out.println(post.mTitle); System.out.println("--------------------------------"); } System.out.println(post.mBody); System.out.println("================================"); } } output.write("</ystfeed>\n"); input.close(); output.close(); } catch (ParseException e) { Usage("Cannot parse arguments", options); } catch (Exception e) { e.printStackTrace(); System.err.println("Terminating due to an exception: " + e); System.exit(1); } }
From source file:main.Driver.java
/** * The path to a properties file which will supply parameter values for the tests should be passed in as argument 0 to main. * The test that will be run is determined by the value of 'test_type' in the properties file, and each of the tests have their own properties: * 'encode+decode' - Encode and decode the given leadsheet with the autoencoder, writing the result to a leadsheet file. * Params: /*from w w w . j a v a 2 s . co m*/ * * autoencoder_connectome={the path to the connectome which the autoencoder will be loaded with} * * name_generator_connectome={the path to the connectome which the name generator will be loaded with} * * input_leadsheet={the path to the leadsheet file which will be encoded and decoded} * * output_folder={the path to the output folder which the result leadsheet file will be written in} * * 'encode+write_queue' - Encode the given leadsheet with the autoencoder, then write the encoded feature queue to a queue file. * Params: * * autoencoder_connectome={the path to the connectome which the autoencoder will be loaded with} * * input_leadsheet={the path to the leadsheet file which will be encoded} * * queue_folder={the path to the output folder which the result queue file will be written in} * * 'encode+write_queue+decode' - Encode the given leadsheet with the autoencoder, write the encoded feature queue to a queue file, and then write the result leadsheet to a leadsheet file. * * autoencoder_connectome={the path to the connectome which the autoencoder will be loaded with} * * name_generator_connectome={the path to the connectome which the name generator will be loaded with} * * input_leadsheet={the path to the leadsheet file which will be encoded and decoded} * * queue_folder={the path to the output folder which the result queue file will be written in} * * output_folder={the path to the output folder which the result leadsheet file will be written in} * 'create_feature_property_vector' - Given a corpus folder of leadsheets, construct a vector consisting of property analysis values for each feature in the corpus data * * input_corpus_folder={the path to the corpus folder containing all leadsheets to analyze} * * feature_size={the size (in time steps) of each feature} * * feature_properties_path={the path to write the generated vector file to (the file will be a csv file containing all the values in left-to-right order} * * feature_property={the type of feature property to analyze - current options are 'rest', 'sustain', articulate' (these return ratios of time steps with the given property to the total time steps in the feature). * 'compile_feature_queue_matrix' - Given a corpus folder of feature queues, construct a matrix of all feature vectors and write it as a csv file * * queue_folder={the path to the folder containing all queue files to compile} * * feature_matrix_path={the path to write the result csv file to} * 'generate_from_feature_queue_matrix' - Given a matrix of feature vectors, load the autoencoder with a queue of those features and decode from it, writing the result leadsheet to a file * * autoencoder_connectome={the path to the connectome which the autoencoder will be loaded with} * * reference_leadsheet={the path to the leadsheet we will take the chord sequence from (and loop it to match the length of the feature queue)} * * feature_queue_matrix_path={the path to the feature queue matrix file we will decode from} * * output_file_path={the path to the file we will write our result leadsheet to} * * (optional) song_title={the song title to write in the leadsheet file - by default this is "Generation from Feature Matrix {path of the feature matrix}"} * * feature_size={the size (in time steps) of features} * 'population_trade' - Given a leadsheet file, split it into sections of a specified size, and between sections, generate a response that plays off of a population of previously encoded feature queues * * autoencoder_connectome={the path to the connectome which the autoencoder will be loaded with} * * input_leadsheet={the path to the leadsheet file which will be encoded and traded with} * * output_folder={the path to the output folder which the result leadsheet file will be written in} * * trading_part_size={the size (in time steps) of each trading part. The input leadsheet will be split into sections of this size, and trading responses will be generated in between.} * * interpolation_variance={a random value between zero and this will be added to the interpolation_min at each trading section to calculate the interpolation of the recently encoded queue towards the queue population before decoding the trading response} * * interpolation_min={the minimum ratio of interpolation at each trading section} * * herding_strength={the maximum strength of the herding operation at each section (all queues in the population are interpolated a random amount towards the most recent queue)} * * mutation_strength={the maximum strength of mutation at each section (each element of the feature vectors of all queues in the population are mutated at a random strength} * * crossover_strength{the maximum strength of crossover at each section (there is a chance for every queue that the queue will swap a random feature of itself with the corresponding feature of another random queue)} * 'interpolation' - Given a leadsheet file and a reference queue file, encode the leadsheet file with the autoencoder, and generate from the encoded queue for a number of divisions of a full interpolation towards the target queue * * autoencoder_connectome={the path to the connectome which the autoencoder will be loaded with} * * input_leadsheet={the path to the leadsheet file which will be encoded and interpolated} * * target_queue={the path to the queue to interpolate towards at each interpolation value}; * * output_folder={the path to the output folder which the result leadsheet file will be written in} * * num_interpolation_divisions={the number of divisions of the interpolation strength from 0.0 to 1.0 (the length of the result leadsheet will be equal to the length of the original times 1 + number of divisions, as the first section of the result leadsheet is for interpolation 0.0)} * 'frankenstein' - Given a primary queue, a reference leadsheet for chords, and a corpus of queue files, construct the result leadsheet from a series of randomly weighted interpolations of the primary queue towards the set of selected queues. * * autoencoder_connectome={the path to the connectome which the autoencoder will be loaded with} * * primary_queue_path={the path to the queue which will serve as the base for all of the queue combinations (which are the result of sequential interpolations instead of a weighted sum)} * * reference_leadsheet={the path to the leadsheet we will take the chord sequence from (and loop it to match the desired length of our output} * * queue_folder={the path to the folder containing all queue files we can select from} * * output_file_path={the path to the file we will write our result leadsheet to} * * num_reference_queues={the number of reference queues we will pick at random from the queue folder to sample from) * * num_combinations={the number of queue combinations to sample and create the result leadsheet from} * * interpolation_strength={the total magnitude of all interpolation operations for each combination} */ public static void main(String[] args) throws FileNotFoundException, IOException, ConfigurationException { FileBasedConfigurationBuilder<PropertiesConfiguration> builder = new FileBasedConfigurationBuilder<>( PropertiesConfiguration.class).configure( new Parameters().properties().setFileName(args[0]).setThrowExceptionOnMissing(true) .setListDelimiterHandler(new DefaultListDelimiterHandler(';')) .setIncludesAllowed(false)); Configuration config = builder.getConfiguration(); LogTimer.initStartTime(); //start our logging timer to keep track of our execution time //switch statement to run the appropriate test switch (config.getString("test_type")) { case "encode+decode": { //load parameter values from config file String autoencoderConnectomePath = config.getString("autoencoder_connectome"); String nameGeneratorConnectomePath = config.getString("name_generator_connectome"); String inputLeadsheetPath = config.getString("input_leadsheet"); String outputFolderPath = config.getString("output_folder"); //initialize networks NameGenerator nameGenerator = initializeNameGenerator(nameGeneratorConnectomePath); ProductCompressingAutoencoder autoencoder = initializeAutoencoder(autoencoderConnectomePath, false); //initialize input sequences and output sequence LeadsheetDataSequence inputSequence = leadsheetToSequence(inputLeadsheetPath); LeadsheetDataSequence outputSequence = inputSequence.copy(); outputSequence.clearMelody(); LeadsheetDataSequence decoderInputSequence = outputSequence.copy(); //encode and decode encodeFromSequence(autoencoder, inputSequence); decodeToSequence(autoencoder, outputSequence, decoderInputSequence); //generate song title String songTitle = nameGenerator.generateName(); //write output to specified directory with same file name + _aeOutput suffix writeLeadsheetFile(outputSequence, outputFolderPath, new File(inputLeadsheetPath).getName(), "_aeOutput", songTitle); } break; case "encode+write_queue": { //load parameter values from config file String autoencoderConnectomePath = config.getString("autoencoder_connectome"); String inputLeadsheetPath = config.getString("input_leadsheet"); String queueFolderPath = config.getString("queue_folder"); //initialize network ProductCompressingAutoencoder autoencoder = initializeAutoencoder(autoencoderConnectomePath, false); //initialize input sequence LeadsheetDataSequence inputSequence = leadsheetToSequence(inputLeadsheetPath); //encode encodeFromSequence(autoencoder, inputSequence); //write to a queue file in the specified queue folder (the write method will handle removing/adding extensions writeQueueFile(autoencoder, queueFolderPath, new File(inputLeadsheetPath).getName()); } break; case "encode+write_queue+decode": { //load parameter values from config file String autoencoderConnectomePath = config.getString("autoencoder_connectome"); String nameGeneratorConnectomePath = config.getString("name_generator_connectome"); String inputLeadsheetPath = config.getString("input_leadsheet"); String queueFolderPath = config.getString("queue_folder"); String outputFolderPath = config.getString("output_folder"); //initialize networks NameGenerator nameGenerator = initializeNameGenerator(nameGeneratorConnectomePath); ProductCompressingAutoencoder autoencoder = initializeAutoencoder(autoencoderConnectomePath, false); //initialize input sequences and output sequence LeadsheetDataSequence inputSequence = leadsheetToSequence(inputLeadsheetPath); LeadsheetDataSequence outputSequence = inputSequence.copy(); outputSequence.clearMelody(); LeadsheetDataSequence decoderInputSequence = outputSequence.copy(); //encode encodeFromSequence(autoencoder, inputSequence); //write to a queue file in the specified queue folder (the write method will handle removing/adding extensions writeQueueFile(autoencoder, queueFolderPath, new File(inputLeadsheetPath).getName()); //decode decodeToSequence(autoencoder, outputSequence, decoderInputSequence); //generate song title String songTitle = nameGenerator.generateName(); //write output to specified directory with same file name + _aeOutput suffix writeLeadsheetFile(outputSequence, outputFolderPath, new File(inputLeadsheetPath).getName(), "_aeOutput", songTitle); } break; case "create_feature_property_vector": { //load parameter values from config file String inputCorpusFolder = config.getString("input_corpus_folder"); int featureSize = config.getInt("feature_size"); String featurePropertiesPath = config.getString("feature_properties_path"); String featureProperty = config.getString("feature_property"); //compile array of valid leadsheet files File[] songFiles = new File(inputCorpusFolder) .listFiles((File dir, String name) -> name.endsWith(".ls")); //construct feature property vector from analyzed feature property values of all songs AVector featurePropertyValues = Vector.createLength(0); int featureIndex = 0; for (File inputFile : songFiles) { LeadsheetDataSequence melodySequence = leadsheetToSequence(inputFile.getPath()); featurePropertyValues.join(melodyFeatureAnalysis(melodySequence, featureProperty, featureSize)); } //write generated feature_properties BufferedWriter writer = new BufferedWriter( new FileWriter(featurePropertiesPath + "_" + featureProperty + ".v")); writer.write(ReadWriteUtilities.getNumpyCSVString(featurePropertyValues)); writer.close(); } break; case "compile_feature_queue_matrix": { //load parameter values from config file String queueFolderPath = config.getString("queue_folder"); String featureMatrixPath = config.getString("feature_matrix_path"); //generate feature matrix from all feature queues in specified queue folder File[] queueFiles = new File(queueFolderPath).listFiles((File dir, String name) -> name.endsWith(".q")); AMatrix totalFeatureMatrix = generateFeatureQueueMatrix(queueFiles); String writeData = ReadWriteUtilities.getNumpyCSVString(totalFeatureMatrix); BufferedWriter writer = new BufferedWriter(new FileWriter(featureMatrixPath)); writer.write(writeData); writer.close(); } break; case "generate_from_feature_queue_matrix": { //load parameter values from config file String autoencoderConnectomePath = config.getString("autoencoder_connectome"); String referenceLeadsheetPath = config.getString("reference_leadsheet"); String featureQueueMatrixPath = config.getString("feature_queue_matrix_path"); String outputFilePath = config.getString("output_file_path"); String songTitle = config.getString("song_title", "Generation from Feature Matrix " + featureQueueMatrixPath); int featureSize = config.getInt("feature_size"); //initialize network ProductCompressingAutoencoder autoencoder = initializeAutoencoder(autoencoderConnectomePath, false); //initialize chord sequence LeadsheetDataSequence chordSequence = leadsheetToSequence(referenceLeadsheetPath); chordSequence.clearMelody(); //call generation method generateFromFeatureMatrix(autoencoder, autoencoderConnectomePath, chordSequence, featureQueueMatrixPath, featureSize, outputFilePath, songTitle); } break; case "population_trade": { //load parameter values from config file String autoencoderConnectomePath = config.getString("autoencoder_connectome"); String inputLeadsheetPath = config.getString("input_leadsheet"); String outputFolderPath = config.getString("output_folder"); int tradingPartSize = config.getInt("trading_part_size"); double interpVariance = config.getDouble("interpolation_variance"); double interpMin = config.getDouble("interpolation_min"); double herdingStrength = config.getDouble("herding_strength"); double mutationStrength = config.getDouble("mutation_strength"); double crossoverStrength = config.getDouble("crossover_strength"); //initialize network ProductCompressingAutoencoder autoencoder = initializeAutoencoder(autoencoderConnectomePath, true); //perform population trading test populationTradingTest(autoencoder, autoencoderConnectomePath, new File(inputLeadsheetPath), new File(outputFolderPath), tradingPartSize, interpVariance, interpMin, herdingStrength, mutationStrength, crossoverStrength); } break; case "interpolation": { //load parameter values from config file String autoencoderConnectomePath = config.getString("autoencoder_connectome"); String inputLeadsheetPath = config.getString("input_leadsheet"); String targetQueuePath = config.getString("target_queue"); String outputFolderPath = config.getString("output_folder"); int numInterpolationDivisions = config.getInt("num_interpolation_divisions"); //initialize network ProductCompressingAutoencoder autoencoder = initializeAutoencoder(autoencoderConnectomePath, false); //perform the interpolation test interpolateTest(autoencoder, autoencoderConnectomePath, new File(inputLeadsheetPath), new File(targetQueuePath), new File(outputFolderPath), numInterpolationDivisions); } break; case "frankenstein": { //load parameter values from config file String autoencoderConnectomePath = config.getString("autoencoder_connectome"); String primaryQueuePath = config.getString("primary_queue_path"); String referenceLeadsheetPath = config.getString("reference_leadsheet"); String queueFolderPath = config.getString("queue_folder"); String outputFilePath = config.getString("output_file_path"); int numReferenceQueues = config.getInt("num_reference_queues"); int numCombinations = config.getInt("num_combinations"); double interpolationMagnitude = config.getDouble("interpolation_strength"); //initialize network ProductCompressingAutoencoder autoencoder = initializeAutoencoder(autoencoderConnectomePath, false); //initialize chord sequence LeadsheetDataSequence chordSequence = leadsheetToSequence(referenceLeadsheetPath); chordSequence.clearMelody(); //perform frankenstein test frankensteinTest(autoencoder, autoencoderConnectomePath, primaryQueuePath, new File(queueFolderPath), outputFilePath, chordSequence, numReferenceQueues, numCombinations, interpolationMagnitude); } break; default: throw new RuntimeException("Unrecognized test type"); } LogTimer.log("Process finished"); //Done! }
From source file:featureExtractor.popularityMeasure.java
public static void main(String[] args) throws IOException { //ReadKnownPopularityScores(); FileWriter fw = new FileWriter(Out_resultFile); BufferedWriter bw = new BufferedWriter(fw); FileReader inputFile = new FileReader(In_entities); BufferedReader bufferReader = new BufferedReader(inputFile); String line;/*from w w w . j a va 2s . c o m*/ while ((line = bufferReader.readLine()) != null) { String[] row = line.split("\t"); double score = 0; String entityName = row[0].toLowerCase().trim(); System.out.println("Searching for : " + entityName); if (knownScore_table.containsKey(entityName)) { //System.out.println("Already known for: " + entityName); score = knownScore_table.get(entityName); } else { System.out.println("Not known for: " + entityName); String json = searchTest(entityName, "&scoring=entity"); try { score = ParseJSON_getScore(json); } catch (Exception e) { score = 0; } System.out.println("Putting : " + entityName); knownScore_table.put(entityName, score); } bw.write(row[0] + "\t" + score + "\n"); System.out.println(row[0]); } bw.close(); }
From source file:PCC.java
/** * @param args the command line arguments * @throws java.io.IOException/*from www. j a v a 2 s . co m*/ */ public static void main(String[] args) throws IOException { // TODO code application logic here PearsonsCorrelation corel = new PearsonsCorrelation(); PCC method = new PCC(); ArrayList<String> name = new ArrayList<>(); Multimap<String, String> genes = ArrayListMultimap.create(); BufferedWriter bw = new BufferedWriter(new FileWriter(args[1])); BufferedReader br = new BufferedReader(new FileReader(args[0])); String str; while ((str = br.readLine()) != null) { String[] a = str.split("\t"); name.add(a[0]); for (int i = 1; i < a.length; i++) { genes.put(a[0], a[i]); } } for (String key : genes.keySet()) { double[] first = new double[genes.get(key).size()]; int element1 = 0; for (String value : genes.get(key)) { double d = Double.parseDouble(value); first[element1] = d; element1++; } for (String key1 : genes.keySet()) { if (!key.equals(key1)) { double[] second = new double[genes.get(key1).size()]; int element2 = 0; for (String value : genes.get(key1)) { double d = Double.parseDouble(value); second[element2] = d; element2++; } double corrlation = corel.correlation(first, second); if (corrlation > 0.5) { bw.write(key + "\t" + key1 + "\t" + corrlation + "\t" + method.pvalue(corrlation, second.length) + "\n"); } } } } br.close(); bw.close(); }