List of usage examples for java.io BufferedWriter BufferedWriter
public BufferedWriter(Writer out)
From source file:com.galois.fiveui.HeadlessRunner.java
/** * @param args list of headless run description filenames * @throws IOException/*from ww w. j av a2 s . c o m*/ * @throws URISyntaxException * @throws ParseException */ @SuppressWarnings("static-access") public static void main(final String[] args) throws IOException, URISyntaxException, ParseException { // Setup command line options Options options = new Options(); Option help = new Option("h", "print this help message"); Option output = OptionBuilder.withArgName("outfile").hasArg().withDescription("write output to file") .create("o"); Option report = OptionBuilder.withArgName("report directory").hasArg() .withDescription("write HTML reports to given directory").create("r"); options.addOption(output); options.addOption(report); options.addOption("v", false, "verbose output"); options.addOption("vv", false, "VERY verbose output"); options.addOption(help); // Parse command line options CommandLineParser parser = new GnuParser(); CommandLine cmd = null; try { cmd = parser.parse(options, args); } catch (ParseException e) { System.err.println("Command line option parsing failed. Reason: " + e.getMessage()); System.exit(1); } // Display help if requested if (cmd.hasOption("h")) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("headless <input file 1> [<input file 2> ...]", options); System.exit(1); } // Set logging levels BasicConfigurator.configure(); Logger fiveuiLogger = Logger.getLogger("com.galois.fiveui"); Logger rootLogger = Logger.getRootLogger(); if (cmd.hasOption("v")) { fiveuiLogger.setLevel(Level.DEBUG); rootLogger.setLevel(Level.ERROR); } else if (cmd.hasOption("vv")) { fiveuiLogger.setLevel(Level.DEBUG); rootLogger.setLevel(Level.DEBUG); } else { fiveuiLogger.setLevel(Level.ERROR); rootLogger.setLevel(Level.ERROR); } // Setup output file if requested PrintWriter outStream = null; if (cmd.hasOption("o")) { String outfile = cmd.getOptionValue("o"); try { outStream = new PrintWriter(new BufferedWriter(new FileWriter(outfile))); } catch (IOException e) { System.err.println("Could not open outfile for writing: " + cmd.getOptionValue("outfile")); System.exit(1); } } else { outStream = new PrintWriter(new BufferedWriter(new PrintWriter(System.out))); } // Setup HTML reports directory before the major work happens in case we // have to throw an exception. PrintWriter summaryFile = null; PrintWriter byURLFile = null; PrintWriter byRuleFile = null; if (cmd.hasOption("r")) { String repDir = cmd.getOptionValue("r"); try { File file = new File(repDir); if (!file.exists()) { file.mkdir(); logger.info("report directory created: " + repDir); } else { logger.info("report directory already exists!"); } summaryFile = new PrintWriter(new FileWriter(repDir + File.separator + "summary.html")); byURLFile = new PrintWriter(new FileWriter(repDir + File.separator + "byURL.html")); byRuleFile = new PrintWriter(new FileWriter(repDir + File.separator + "byRule.html")); } catch (IOException e) { System.err.println("could not open report directory / files for writing"); System.exit(1); } } // Major work: process input files ImmutableList<Result> results = null; for (String in : cmd.getArgs()) { HeadlessRunDescription descr = HeadlessRunDescription.parse(in); logger.debug("invoking headless run..."); BatchRunner runner = new BatchRunner(); results = runner.runHeadless(descr); logger.debug("runHeadless returned " + results.size() + " results"); // write results to the output stream as we go for (Result result : results) { outStream.println(result.toString()); } outStream.flush(); } outStream.close(); // Write report files if requested if (cmd.hasOption("r") && results != null) { Reporter kermit = new Reporter(results); summaryFile.write(kermit.getSummary()); summaryFile.close(); byURLFile.write(kermit.getByURL()); byURLFile.close(); byRuleFile.write(kermit.getByRule()); byRuleFile.close(); } }
From source file:apps.Source2XML.java
public static void main(String[] args) { Options options = new Options(); options.addOption("i", null, true, "input file"); options.addOption("o", null, true, "output file"); options.addOption("reparse_xml", null, false, "reparse each XML entry to ensure the parser doesn't fail"); Joiner commaJoin = Joiner.on(','); options.addOption("source_type", null, true, "document source type: " + commaJoin.join(SourceFactory.getDocSourceList())); Joiner spaceJoin = Joiner.on(' '); CommandLineParser parser = new org.apache.commons.cli.GnuParser(); BufferedWriter outputFile = null; int docNum = 0; if (USE_LEMMATIZER && USE_STEMMER) { System.err.println("Bug/inconsistent code: cann't use the stemmer and lemmatizer at the same time!"); System.exit(1);/* w w w . j a v a 2s . c om*/ } //Stemmer stemmer = new Stemmer(); KrovetzStemmer stemmer = new KrovetzStemmer(); System.out.println("Using Stanford NLP? " + USE_STANFORD); System.out.println("Using Stanford lemmatizer? " + USE_LEMMATIZER); System.out.println("Using stemmer? " + USE_STEMMER + (USE_STEMMER ? " (class: " + stemmer.getClass().getCanonicalName() + ")" : "")); try { CommandLine cmd = parser.parse(options, args); String inputFileName = null, outputFileName = null; if (cmd.hasOption("i")) { inputFileName = cmd.getOptionValue("i"); } else { Usage("Specify 'input file'", options); } if (cmd.hasOption("o")) { outputFileName = cmd.getOptionValue("o"); } else { Usage("Specify 'output file'", options); } outputFile = new BufferedWriter( new OutputStreamWriter(CompressUtils.createOutputStream(outputFileName))); String sourceName = cmd.getOptionValue("source_type"); if (sourceName == null) Usage("Specify document source type", options); boolean reparseXML = options.hasOption("reparse_xml"); DocumentSource inpDocSource = SourceFactory.createDocumentSource(sourceName, inputFileName); DocumentEntry inpDoc = null; TextCleaner textCleaner = new TextCleaner( new DictNoComments(new File("data/stopwords.txt"), true /* lower case */), USE_STANFORD, USE_LEMMATIZER); Map<String, String> outputMap = new HashMap<String, String>(); outputMap.put(UtilConst.XML_FIELD_DOCNO, null); outputMap.put(UtilConst.XML_FIELD_TEXT, null); XmlHelper xmlHlp = new XmlHelper(); if (reparseXML) System.out.println("Will reparse every XML entry to verify correctness!"); while ((inpDoc = inpDocSource.next()) != null) { ++docNum; ArrayList<String> toks = textCleaner.cleanUp(inpDoc.mDocText); ArrayList<String> goodToks = new ArrayList<String>(); for (String s : toks) if (s.length() <= MAX_WORD_LEN && // Exclude long and short words s.length() >= MIN_WORD_LEN && isGoodWord(s)) goodToks.add(USE_STEMMER ? stemmer.stem(s) : s); String partlyCleanedText = spaceJoin.join(goodToks); String cleanText = XmlHelper.removeInvaildXMLChars(partlyCleanedText); // isGoodWord combiend with Stanford tokenizer should be quite restrictive already //cleanText = replaceSomePunct(cleanText); outputMap.replace(UtilConst.XML_FIELD_DOCNO, inpDoc.mDocId); outputMap.replace(UtilConst.XML_FIELD_TEXT, cleanText); String xml = xmlHlp.genXMLIndexEntry(outputMap); if (reparseXML) { try { XmlHelper.parseDocWithoutXMLDecl(xml); } catch (Exception e) { System.err.println("Error re-parsing xml for document ID: " + inpDoc.mDocId); System.exit(1); } } /* { System.out.println(inpDoc.mDocId); System.out.println("====================="); System.out.println(partlyCleanedText); System.out.println("====================="); System.out.println(cleanText); } */ try { outputFile.write(xml); outputFile.write(NL); } catch (Exception e) { e.printStackTrace(); System.err.println("Error processing/saving a document!"); } if (docNum % 1000 == 0) System.out.println(String.format("Processed %d documents", docNum)); } } catch (ParseException e) { e.printStackTrace(); Usage("Cannot parse arguments" + e, options); } catch (Exception e) { System.err.println("Terminating due to an exception: " + e); System.exit(1); } finally { System.out.println(String.format("Processed %d documents", docNum)); try { if (null != outputFile) { outputFile.close(); System.out.println("Output file is closed! all seems to be fine..."); } } catch (IOException e) { System.err.println("IO exception: " + e); e.printStackTrace(); } } }
From source file:com.era7.bioinfo.annotation.AutomaticQualityControl.java
public static void main(String[] args) { if (args.length != 4) { System.out.println("This program expects four parameters: \n" + "1. Gene annotation XML filename \n" + "2. Reference protein set (.fasta)\n" + "3. Output TXT filename\n" + "4. Initial Blast XML results filename (the one used at the very beginning of the semiautomatic annotation process)\n"); } else {/* w w w .ja v a2 s . c o m*/ BufferedWriter outBuff = null; try { File inFile = new File(args[0]); File fastaFile = new File(args[1]); File outFile = new File(args[2]); File blastFile = new File(args[3]); //Primero cargo todos los datos del archivo xml del blast BufferedReader buffReader = new BufferedReader(new FileReader(blastFile)); StringBuilder stBuilder = new StringBuilder(); String line = null; while ((line = buffReader.readLine()) != null) { stBuilder.append(line); } buffReader.close(); System.out.println("Creating blastoutput..."); BlastOutput blastOutput = new BlastOutput(stBuilder.toString()); System.out.println("BlastOutput created! :)"); stBuilder.delete(0, stBuilder.length()); HashMap<String, String> blastProteinsMap = new HashMap<String, String>(); ArrayList<Iteration> iterations = blastOutput.getBlastOutputIterations(); for (Iteration iteration : iterations) { blastProteinsMap.put(iteration.getQueryDef().split("\\|")[1].trim(), iteration.toString()); } //freeing some memory blastOutput = null; //------------------------------------------------------------------------ //Initializing writer for output file outBuff = new BufferedWriter(new FileWriter(outFile)); //reading gene annotation xml file..... buffReader = new BufferedReader(new FileReader(inFile)); stBuilder = new StringBuilder(); line = null; while ((line = buffReader.readLine()) != null) { stBuilder.append(line); } buffReader.close(); XMLElement genesXML = new XMLElement(stBuilder.toString()); //freeing some memory I don't need anymore stBuilder.delete(0, stBuilder.length()); //reading file with the reference proteins set ArrayList<String> proteinsReferenceSet = new ArrayList<String>(); buffReader = new BufferedReader(new FileReader(fastaFile)); while ((line = buffReader.readLine()) != null) { if (line.charAt(0) == '>') { proteinsReferenceSet.add(line.split("\\|")[1]); } } buffReader.close(); Element pGenes = genesXML.asJDomElement().getChild(PredictedGenes.TAG_NAME); List<Element> contigs = pGenes.getChildren(ContigXML.TAG_NAME); System.out.println("There are " + contigs.size() + " contigs to be checked... "); outBuff.write("There are " + contigs.size() + " contigs to be checked... \n"); outBuff.write("Proteins reference set: \n"); for (String st : proteinsReferenceSet) { outBuff.write(st + ","); } outBuff.write("\n"); for (Element elem : contigs) { ContigXML contig = new ContigXML(elem); //escribo el id del contig en el que estoy outBuff.write("Checking contig: " + contig.getId() + "\n"); outBuff.flush(); List<XMLElement> geneList = contig.getChildrenWith(PredictedGene.TAG_NAME); System.out.println("geneList.size() = " + geneList.size()); int numeroDeGenesParaAnalizar = geneList.size() / FACTOR; if (numeroDeGenesParaAnalizar == 0) { numeroDeGenesParaAnalizar++; } ArrayList<Integer> indicesUtilizados = new ArrayList<Integer>(); outBuff.write("\nThe contig has " + geneList.size() + " predicted genes, let's analyze: " + numeroDeGenesParaAnalizar + "\n"); for (int j = 0; j < numeroDeGenesParaAnalizar; j++) { int geneIndex; boolean geneIsDismissed = false; do { geneIsDismissed = false; geneIndex = (int) Math.round(Math.floor(Math.random() * geneList.size())); PredictedGene tempGene = new PredictedGene(geneList.get(geneIndex).asJDomElement()); if (tempGene.getStatus().equals(PredictedGene.STATUS_DISMISSED)) { geneIsDismissed = true; } } while (indicesUtilizados.contains(new Integer(geneIndex)) && geneIsDismissed); indicesUtilizados.add(geneIndex); System.out.println("geneIndex = " + geneIndex); //Ahora hay que sacar el gen correspondiente al indice y hacer el control de calidad PredictedGene gene = new PredictedGene(geneList.get(geneIndex).asJDomElement()); outBuff.write("\nAnalyzing gene with id: " + gene.getId() + " , annotation uniprot id: " + gene.getAnnotationUniprotId() + "\n"); outBuff.write("eValue: " + gene.getEvalue() + "\n"); //--------------PETICION POST HTTP BLAST---------------------- PostMethod post = new PostMethod(BLAST_URL); post.addParameter("program", "blastx"); post.addParameter("sequence", gene.getSequence()); post.addParameter("database", "uniprotkb"); post.addParameter("email", "ppareja@era7.com"); post.addParameter("exp", "1e-10"); post.addParameter("stype", "dna"); // execute the POST HttpClient client = new HttpClient(); int status = client.executeMethod(post); System.out.println("status post = " + status); InputStream inStream = post.getResponseBodyAsStream(); String fileName = "jobid.txt"; FileOutputStream outStream = new FileOutputStream(new File(fileName)); byte[] buffer = new byte[1024]; int len; while ((len = inStream.read(buffer)) != -1) { outStream.write(buffer, 0, len); } outStream.close(); //Once the file is created I just have to read one line in order to extract the job id buffReader = new BufferedReader(new FileReader(new File(fileName))); String jobId = buffReader.readLine(); buffReader.close(); System.out.println("jobId = " + jobId); //--------------HTTP CHECK JOB STATUS REQUEST---------------------- GetMethod get = new GetMethod(CHECK_JOB_STATUS_URL + jobId); String jobStatus = ""; do { try { Thread.sleep(1000);//sleep for 1000 ms } catch (InterruptedException ie) { //If this thread was intrrupted by nother thread } status = client.executeMethod(get); //System.out.println("status get = " + status); inStream = get.getResponseBodyAsStream(); fileName = "jobStatus.txt"; outStream = new FileOutputStream(new File(fileName)); while ((len = inStream.read(buffer)) != -1) { outStream.write(buffer, 0, len); } outStream.close(); //Once the file is created I just have to read one line in order to extract the job id buffReader = new BufferedReader(new FileReader(new File(fileName))); jobStatus = buffReader.readLine(); //System.out.println("jobStatus = " + jobStatus); buffReader.close(); } while (!jobStatus.equals(FINISHED_JOB_STATUS)); //Once I'm here the blast should've already finished //--------------JOB RESULTS HTTP REQUEST---------------------- get = new GetMethod(JOB_RESULT_URL + jobId + "/out"); status = client.executeMethod(get); System.out.println("status get = " + status); inStream = get.getResponseBodyAsStream(); fileName = "jobResults.txt"; outStream = new FileOutputStream(new File(fileName)); while ((len = inStream.read(buffer)) != -1) { outStream.write(buffer, 0, len); } outStream.close(); //--------parsing the blast results file----- TreeSet<GeneEValuePair> featuresBlast = new TreeSet<GeneEValuePair>(); buffReader = new BufferedReader(new FileReader(new File(fileName))); while ((line = buffReader.readLine()) != null) { if (line.length() > 3) { String prefix = line.substring(0, 3); if (prefix.equals("TR:") || prefix.equals("SP:")) { String[] columns = line.split(" "); String id = columns[1]; //System.out.println("id = " + id); String e = ""; String[] arraySt = line.split("\\.\\.\\."); if (arraySt.length > 1) { arraySt = arraySt[1].trim().split(" "); int contador = 0; for (int k = 0; k < arraySt.length && contador <= 2; k++) { String string = arraySt[k]; if (!string.equals("")) { contador++; if (contador == 2) { e = string; } } } } else { //Number before e- String[] arr = arraySt[0].split("e-")[0].split(" "); String numeroAntesE = arr[arr.length - 1]; String numeroDespuesE = arraySt[0].split("e-")[1].split(" ")[0]; e = numeroAntesE + "e-" + numeroDespuesE; } double eValue = Double.parseDouble(e); //System.out.println("eValue = " + eValue); GeneEValuePair g = new GeneEValuePair(id, eValue); featuresBlast.add(g); } } } GeneEValuePair currentGeneEValuePair = new GeneEValuePair(gene.getAnnotationUniprotId(), gene.getEvalue()); System.out.println("currentGeneEValuePair.id = " + currentGeneEValuePair.id); System.out.println("currentGeneEValuePair.eValue = " + currentGeneEValuePair.eValue); boolean blastContainsGene = false; for (GeneEValuePair geneEValuePair : featuresBlast) { if (geneEValuePair.id.equals(currentGeneEValuePair.id)) { blastContainsGene = true; //le pongo la e que tiene en el wu-blast para poder comparar currentGeneEValuePair.eValue = geneEValuePair.eValue; break; } } if (blastContainsGene) { outBuff.write("The protein was found in the WU-BLAST result.. \n"); //Una vez que se que esta en el blast tengo que ver que sea la mejor GeneEValuePair first = featuresBlast.first(); outBuff.write("Protein with best eValue according to the WU-BLAST result: " + first.id + " , " + first.eValue + "\n"); if (first.id.equals(currentGeneEValuePair.id)) { outBuff.write("Proteins with best eValue match up \n"); } else { if (first.eValue == currentGeneEValuePair.eValue) { outBuff.write( "The one with best eValue is not the same protein but has the same eValue \n"); } else if (first.eValue > currentGeneEValuePair.eValue) { outBuff.write( "The one with best eValue is not the same protein but has a worse eValue :) \n"); } else { outBuff.write( "The best protein from BLAST has an eValue smaller than ours, checking if it's part of the reference set...\n"); //System.exit(-1); if (proteinsReferenceSet.contains(first.id)) { //The protein is in the reference set and that shouldn't happen outBuff.write( "The protein was found on the reference set, checking if it belongs to the same contig...\n"); String iterationSt = blastProteinsMap.get(gene.getAnnotationUniprotId()); if (iterationSt != null) { outBuff.write( "The protein was found in the BLAST used at the beginning of the annotation process.\n"); Iteration iteration = new Iteration(iterationSt); ArrayList<Hit> hits = iteration.getIterationHits(); boolean contigFound = false; Hit errorHit = null; for (Hit hit : hits) { if (hit.getHitDef().indexOf(contig.getId()) >= 0) { contigFound = true; errorHit = hit; break; } } if (contigFound) { outBuff.write( "ERROR: A hit from the same contig was find in the Blast file: \n" + errorHit.toString() + "\n"); } else { outBuff.write("There is no hit with the same contig! :)\n"); } } else { outBuff.write( "The protein is NOT in the BLAST used at the beginning of the annotation process.\n"); } } else { //The protein was not found on the reference set so everything's ok outBuff.write( "The protein was not found on the reference, everything's ok :)\n"); } } } } else { outBuff.write("The protein was NOT found on the WU-BLAST !! :( \n"); //System.exit(-1); } } } } catch (Exception ex) { ex.printStackTrace(); } finally { try { //closing outputfile outBuff.close(); } catch (IOException ex) { Logger.getLogger(AutomaticQualityControl.class.getName()).log(Level.SEVERE, null, ex); } } } }
From source file:PodbaseMetadataMigration2.java
public static void main(String[] args) throws Exception { System.out.println("Running data migration"); String projectString = FileUtils.readFileToString(new File("projects.txt")); Map<String, Integer> projectIdMapping = new HashMap<String, Integer>(); for (String line : projectString.split("\n")) { String[] split = line.split(":"); int id = Integer.parseInt(split[0].trim()); String name = split[1].trim(); projectIdMapping.put(name, id);/*from www . j ava 2s . c o m*/ } System.out.println("Reading projects.."); List<ProjectEntry> projects = dataFromFile("./migrate/projects.data", ProjectEntry.class); projectIdMap = parseProjectMap(projects, projectIdMapping); System.out.println("Found " + projects.size() + " projects."); System.out.println("Reading tags.."); List<TagEntry> tags = dataFromFile("./migrate/tags.data", TagEntry.class); System.out.println("Found " + tags.size() + " tags."); System.out.println("Reading templates.."); List<TemplateEntry> templates = dataFromFile("./migrate/templates.data", TemplateEntry.class); System.out.println("Found " + templates.size() + " templates."); System.out.println("Reading template fields.."); List<TemplateFieldEntry> templateFields = dataFromFile("./migrate/template_fields.data", TemplateFieldEntry.class); System.out.println("Found " + templateFields.size() + " templateFields."); int entryCount = tags.size() + templates.size() + templateFields.size(); //System.out.println("Generating Project SQL"); //String projectSql = generateSql((List<AbstractEntry>)(List<?>)projects); System.out.println("Generating Attribute SQL"); String imageAttributes = generateSql((List<AbstractEntry>) (List<?>) tags); System.out.println("Generating Image SQL"); String databaseImages = generateDatabaseImageSql(); //System.out.println("Generating Directory SQL"); //String directorySql = generateDirectorySql(projects); //System.out.println("Generating Template SQL"); //String templateSql = generateSql((List<AbstractEntry>)(List<?>)templates); //System.out.println("Generating Field SQL"); //String fieldsSql = generateSql((List<AbstractEntry>)(List<?>)templateFields); System.out.println("Writing database.sql"); BufferedWriter bw = new BufferedWriter(new FileWriter(new File("./database.sql"))); //bw.append(projectSql); //bw.append("\n\n"); bw.append(databaseImages); bw.append("\n\n"); //bw.append(directorySql); //bw.append("\n\n"); bw.append(imageAttributes); bw.append("\n\n"); // bw.append(templateSql); // bw.append("\n\n"); // bw.append(fieldsSql); // bw.append("\n\n"); bw.close(); System.out.println("Writing missingImages.txt"); bw = new BufferedWriter(new FileWriter(new File("./missingImages.txt"))); for (String img : missingImages) { bw.append(img + "\n"); } bw.close(); System.out.println("Migration completed successfully!"); }
From source file:com.linkedin.pinotdruidbenchmark.PinotResponseTime.java
public static void main(String[] args) throws Exception { if (args.length != 4 && args.length != 5) { System.err.println(/* w w w .jav a 2 s . c om*/ "4 or 5 arguments required: QUERY_DIR, RESOURCE_URL, WARM_UP_ROUNDS, TEST_ROUNDS, RESULT_DIR (optional)."); return; } File queryDir = new File(args[0]); String resourceUrl = args[1]; int warmUpRounds = Integer.parseInt(args[2]); int testRounds = Integer.parseInt(args[3]); File resultDir; if (args.length == 4) { resultDir = null; } else { resultDir = new File(args[4]); if (!resultDir.exists()) { if (!resultDir.mkdirs()) { throw new RuntimeException("Failed to create result directory: " + resultDir); } } } File[] queryFiles = queryDir.listFiles(); assert queryFiles != null; Arrays.sort(queryFiles); try (CloseableHttpClient httpClient = HttpClients.createDefault()) { HttpPost httpPost = new HttpPost(resourceUrl); for (File queryFile : queryFiles) { String query = new BufferedReader(new FileReader(queryFile)).readLine(); httpPost.setEntity(new StringEntity("{\"pql\":\"" + query + "\"}")); System.out.println( "--------------------------------------------------------------------------------"); System.out.println("Running query: " + query); System.out.println( "--------------------------------------------------------------------------------"); // Warm-up Rounds System.out.println("Run " + warmUpRounds + " times to warm up..."); for (int i = 0; i < warmUpRounds; i++) { CloseableHttpResponse httpResponse = httpClient.execute(httpPost); httpResponse.close(); System.out.print('*'); } System.out.println(); // Test Rounds System.out.println("Run " + testRounds + " times to get response time statistics..."); long[] responseTimes = new long[testRounds]; long totalResponseTime = 0L; for (int i = 0; i < testRounds; i++) { long startTime = System.currentTimeMillis(); CloseableHttpResponse httpResponse = httpClient.execute(httpPost); httpResponse.close(); long responseTime = System.currentTimeMillis() - startTime; responseTimes[i] = responseTime; totalResponseTime += responseTime; System.out.print(responseTime + "ms "); } System.out.println(); // Store result. if (resultDir != null) { File resultFile = new File(resultDir, queryFile.getName() + ".result"); CloseableHttpResponse httpResponse = httpClient.execute(httpPost); try (BufferedInputStream bufferedInputStream = new BufferedInputStream( httpResponse.getEntity().getContent()); BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(resultFile))) { int length; while ((length = bufferedInputStream.read(BYTE_BUFFER)) > 0) { bufferedWriter.write(new String(BYTE_BUFFER, 0, length)); } } httpResponse.close(); } // Process response times. double averageResponseTime = (double) totalResponseTime / testRounds; double temp = 0; for (long responseTime : responseTimes) { temp += (responseTime - averageResponseTime) * (responseTime - averageResponseTime); } double standardDeviation = Math.sqrt(temp / testRounds); System.out.println("Average response time: " + averageResponseTime + "ms"); System.out.println("Standard deviation: " + standardDeviation); } } }
From source file:com.nextdoor.bender.S3SnsNotifier.java
public static void main(String[] args) throws ParseException, InterruptedException, IOException { formatter = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'").withZoneUTC(); /*/*w ww .j a va2 s .c o m*/ * Parse cli arguments */ Options options = new Options(); options.addOption(Option.builder().longOpt("bucket").hasArg().required() .desc("Name of S3 bucket to list s3 objects from").build()); options.addOption(Option.builder().longOpt("key-file").hasArg().required() .desc("Local file of S3 keys to process").build()); options.addOption( Option.builder().longOpt("sns-arn").hasArg().required().desc("SNS arn to publish to").build()); options.addOption(Option.builder().longOpt("throttle-ms").hasArg() .desc("Amount of ms to wait between publishing to SNS").build()); options.addOption(Option.builder().longOpt("processed-file").hasArg() .desc("Local file to use to store procssed S3 object names").build()); options.addOption(Option.builder().longOpt("skip-processed").hasArg(false) .desc("Whether to skip S3 objects that have been processed").build()); options.addOption( Option.builder().longOpt("dry-run").hasArg(false).desc("If set do not publish to SNS").build()); CommandLineParser parser = new DefaultParser(); CommandLine cmd = parser.parse(options, args); String bucket = cmd.getOptionValue("bucket"); String keyFile = cmd.getOptionValue("key-file"); String snsArn = cmd.getOptionValue("sns-arn"); String processedFile = cmd.getOptionValue("processed-file", null); boolean skipProcessed = cmd.hasOption("skip-processed"); dryRun = cmd.hasOption("dry-run"); long throttle = Long.parseLong(cmd.getOptionValue("throttle-ms", "-1")); if (processedFile != null) { File file = new File(processedFile); if (!file.exists()) { logger.debug("creating local file to store processed s3 object names: " + processedFile); file.createNewFile(); } } /* * Import S3 keys that have been processed */ if (skipProcessed && processedFile != null) { try (BufferedReader br = new BufferedReader(new FileReader(processedFile))) { String line; while ((line = br.readLine()) != null) { alreadyPublished.add(line.trim()); } } } /* * Setup writer for file containing processed S3 keys */ FileWriter fw = null; BufferedWriter bw = null; if (processedFile != null) { fw = new FileWriter(processedFile, true); bw = new BufferedWriter(fw); } /* * Create clients */ AmazonS3Client s3Client = new AmazonS3Client(); AmazonSNSClient snsClient = new AmazonSNSClient(); /* * Get S3 object list */ try (BufferedReader br = new BufferedReader(new FileReader(keyFile))) { String line; while ((line = br.readLine()) != null) { String key = line.trim(); if (alreadyPublished.contains(key)) { logger.info("skipping " + key); } ObjectMetadata om = s3Client.getObjectMetadata(bucket, key); S3EventNotification s3Notification = getS3Notification(key, bucket, om.getContentLength()); String json = s3Notification.toJson(); /* * Publish to SNS */ if (publish(snsArn, json, snsClient, key) && processedFile != null) { bw.write(key + "\n"); bw.flush(); } if (throttle != -1) { Thread.sleep(throttle); } } } if (processedFile != null) { bw.close(); fw.close(); } }
From source file:it.sayservice.platform.smartplanner.utils.LegGenerator.java
public static void main(String[] args) throws IOException { Mongo m = new Mongo("localhost"); // default port 27017 DB db = m.getDB("smart-planner-15x"); DBCollection coll = db.getCollection("stops"); // read trips.txt(trips,serviceId). List<String[]> trips = readFileGetLines("src/main/resources/schedules/17/trips.txt"); List<String[]> stopTimes = readFileGetLines("src/main/resources/schedules/17/stop_times.txt"); for (String[] words : trips) { try {//from ww w.ja v a2 s .co m String routeId = words[0].trim(); String serviceId = words[1].trim(); String tripId = words[2].trim(); // fetch schedule for trips. for (int i = 0; i < stopTimes.size(); i++) { // already ordered by occurence. String[] scheduleLeg = stopTimes.get(i); if (scheduleLeg[0].equalsIgnoreCase(tripId)) { // check if next leg belongs to same trip if (stopTimes.get(i + 1)[0].equalsIgnoreCase(tripId)) { String arrivalT = scheduleLeg[1]; String departT = scheduleLeg[2]; String sourceId = scheduleLeg[3]; String destId = stopTimes.get(i + 1)[3]; // get coordinates of stops. /** * make sure that mongo stop collection is * populated. if, not, invoke * http://localhost:7070/smart * -planner/rest/getTransitTimes * /TB_R2_R/1366776000000/1366819200000 */ Stop source = (Stop) getObjectByField(db, "id", sourceId, coll, Stop.class); Stop destination = (Stop) getObjectByField(db, "id", destId, coll, Stop.class); // System.out.println(tripId + "," // + routeId + "," // + source.getId() + "," // + source.getLatitude() + "," // + source.getLongitude() + "," // + arrivalT + "," // + destination.getId() + "," // + destination.getLatitude() + "," // + destination.getLongitude() + "," // + departT + "," // + serviceId // ); String content = tripId + "," + routeId + "," + source.getStopId() + "," + source.getLatitude() + "," + source.getLongitude() + "," + arrivalT + "," + destination.getStopId() + "," + destination.getLatitude() + "," + destination.getLongitude() + "," + departT + "," + "Giornaliero" + "\n"; File file = new File("src/main/resources/legs/legs.txt"); // single leg file if (!file.exists()) { file.createNewFile(); } FileWriter fw = new FileWriter(file.getAbsoluteFile(), true); BufferedWriter bw = new BufferedWriter(fw); bw.write(content); bw.close(); // individual trip leg file. File fileT = new File("src/main/resources/legs/legs_" + routeId + ".txt"); FileWriter fwT = new FileWriter(fileT.getAbsoluteFile(), true); BufferedWriter bwT = new BufferedWriter(fwT); bwT.write(content); bwT.close(); } } } } catch (Exception e) { System.out.println("Error parsing trip: " + words[0] + "," + words[1] + "," + words[2]); } } System.out.println("Done"); }
From source file:com.ibm.watson.catalyst.corpus.tfidf.CorpusTfidf.java
public static void main(String[] args) { PROPERTIES = BaseProperties.setInstance(args, "sample/test.properties"); String input = PROPERTIES.getProperty("input", "sample/test-check.json"); TermCorpusBuilder cb = new TermCorpusBuilder(); cb.setJson(input);/*w w w . j av a 2 s . c om*/ System.out.println("Building corpus."); TermCorpus c = cb.build(); System.out.println(c.size()); System.out.println("Generating terms."); c.genTerms(); System.out.println("Generating idfs."); c.genIdfs(); System.out.println(c.numTerms()); System.out.println("Terms generated."); ObjectNode tfidfs = getCorpusTfidfs(c); String output = PROPERTIES.getProperty("output", "sample/test-tfidf-output.json"); try (BufferedWriter bw = new BufferedWriter( new OutputStreamWriter(new FileOutputStream(output), "UTF-8"))) { bw.write(tfidfs.toString()); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } }
From source file:FileCompressor.java
public static void main(String[] args) throws IOException { String file = "D:\\XJad.rar.txt"; BufferedReader reader = new BufferedReader(new FileReader(file)); BufferedWriter writer = new BufferedWriter(new FileWriter(file + "_out.txt")); StringBuilder content = new StringBuilder(); String tmp;/*from w ww .ja v a 2 s . c om*/ while ((tmp = reader.readLine()) != null) { content.append(tmp); content.append(System.getProperty("line.separator")); } FileCompressor f = new FileCompressor(); writer.write(f.compress(content.toString())); writer.close(); reader.close(); reader = new BufferedReader(new FileReader(file + "_out.txt")); StringBuilder content2 = new StringBuilder(); while ((tmp = reader.readLine()) != null) { content2.append(tmp); content2.append(System.getProperty("line.separator")); } String decompressed = f.decompress(content2.toString()); String c = content.toString(); System.out.println(decompressed.equals(c)); }
From source file:com.linkedin.pinotdruidbenchmark.DruidResponseTime.java
public static void main(String[] args) throws Exception { if (args.length != 4 && args.length != 5) { System.err.println(/*from www. j ava 2 s. c om*/ "4 or 5 arguments required: QUERY_DIR, RESOURCE_URL, WARM_UP_ROUNDS, TEST_ROUNDS, RESULT_DIR (optional)."); return; } File queryDir = new File(args[0]); String resourceUrl = args[1]; int warmUpRounds = Integer.parseInt(args[2]); int testRounds = Integer.parseInt(args[3]); File resultDir; if (args.length == 4) { resultDir = null; } else { resultDir = new File(args[4]); if (!resultDir.exists()) { if (!resultDir.mkdirs()) { throw new RuntimeException("Failed to create result directory: " + resultDir); } } } File[] queryFiles = queryDir.listFiles(); assert queryFiles != null; Arrays.sort(queryFiles); try (CloseableHttpClient httpClient = HttpClients.createDefault()) { HttpPost httpPost = new HttpPost(resourceUrl); httpPost.addHeader("content-type", "application/json"); for (File queryFile : queryFiles) { StringBuilder stringBuilder = new StringBuilder(); try (BufferedReader bufferedReader = new BufferedReader(new FileReader(queryFile))) { int length; while ((length = bufferedReader.read(CHAR_BUFFER)) > 0) { stringBuilder.append(new String(CHAR_BUFFER, 0, length)); } } String query = stringBuilder.toString(); httpPost.setEntity(new StringEntity(query)); System.out.println( "--------------------------------------------------------------------------------"); System.out.println("Running query: " + query); System.out.println( "--------------------------------------------------------------------------------"); // Warm-up Rounds System.out.println("Run " + warmUpRounds + " times to warm up..."); for (int i = 0; i < warmUpRounds; i++) { CloseableHttpResponse httpResponse = httpClient.execute(httpPost); httpResponse.close(); System.out.print('*'); } System.out.println(); // Test Rounds System.out.println("Run " + testRounds + " times to get response time statistics..."); long[] responseTimes = new long[testRounds]; long totalResponseTime = 0L; for (int i = 0; i < testRounds; i++) { long startTime = System.currentTimeMillis(); CloseableHttpResponse httpResponse = httpClient.execute(httpPost); httpResponse.close(); long responseTime = System.currentTimeMillis() - startTime; responseTimes[i] = responseTime; totalResponseTime += responseTime; System.out.print(responseTime + "ms "); } System.out.println(); // Store result. if (resultDir != null) { File resultFile = new File(resultDir, queryFile.getName() + ".result"); CloseableHttpResponse httpResponse = httpClient.execute(httpPost); try (BufferedInputStream bufferedInputStream = new BufferedInputStream( httpResponse.getEntity().getContent()); BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(resultFile))) { int length; while ((length = bufferedInputStream.read(BYTE_BUFFER)) > 0) { bufferedWriter.write(new String(BYTE_BUFFER, 0, length)); } } httpResponse.close(); } // Process response times. double averageResponseTime = (double) totalResponseTime / testRounds; double temp = 0; for (long responseTime : responseTimes) { temp += (responseTime - averageResponseTime) * (responseTime - averageResponseTime); } double standardDeviation = Math.sqrt(temp / testRounds); System.out.println("Average response time: " + averageResponseTime + "ms"); System.out.println("Standard deviation: " + standardDeviation); } } }