List of usage examples for java.io BufferedReader close
public void close() throws IOException
From source file:io.bfscan.clueweb12.BuildWarcTrecIdMapping.java
@SuppressWarnings("static-access") public static void main(String[] args) throws Exception { Options options = new Options(); options.addOption(OptionBuilder.withArgName("path").hasArg().withDescription("bz2 Wikipedia XML dump file") .create(INPUT_OPTION));/*from w w w.jav a 2 s. c o m*/ options.addOption( OptionBuilder.withArgName("dir").hasArg().withDescription("index location").create(INDEX_OPTION)); options.addOption(OptionBuilder.withArgName("num").hasArg() .withDescription("maximum number of documents to index").create(MAX_OPTION)); options.addOption(OptionBuilder.withArgName("num").hasArg().withDescription("number of indexing threads") .create(THREADS_OPTION)); options.addOption(new Option(OPTIMIZE_OPTION, "merge indexes into a single segment")); CommandLine cmdline = null; CommandLineParser parser = new GnuParser(); try { cmdline = parser.parse(options, args); } catch (ParseException exp) { System.err.println("Error parsing command line: " + exp.getMessage()); System.exit(-1); } if (!cmdline.hasOption(INPUT_OPTION) || !cmdline.hasOption(INDEX_OPTION)) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp(BuildWarcTrecIdMapping.class.getCanonicalName(), options); System.exit(-1); } String indexPath = cmdline.getOptionValue(INDEX_OPTION); int maxdocs = cmdline.hasOption(MAX_OPTION) ? Integer.parseInt(cmdline.getOptionValue(MAX_OPTION)) : Integer.MAX_VALUE; int threads = cmdline.hasOption(THREADS_OPTION) ? Integer.parseInt(cmdline.getOptionValue(THREADS_OPTION)) : DEFAULT_NUM_THREADS; long startTime = System.currentTimeMillis(); String path = cmdline.getOptionValue(INPUT_OPTION); PrintStream out = new PrintStream(System.out, true, "UTF-8"); Directory dir = FSDirectory.open(new File(indexPath)); IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_43, ANALYZER); config.setOpenMode(OpenMode.CREATE); IndexWriter writer = new IndexWriter(dir, config); LOG.info("Creating index at " + indexPath); LOG.info("Indexing with " + threads + " threads"); FileInputStream fis = null; BufferedReader br = null; try { fis = new FileInputStream(new File(path)); byte[] ignoreBytes = new byte[2]; fis.read(ignoreBytes); // "B", "Z" bytes from commandline tools br = new BufferedReader(new InputStreamReader(new CBZip2InputStream(fis), "UTF8")); ExecutorService executor = Executors.newFixedThreadPool(threads); int cnt = 0; String s; while ((s = br.readLine()) != null) { Runnable worker = new AddDocumentRunnable(writer, s); executor.execute(worker); cnt++; if (cnt % 1000000 == 0) { LOG.info(cnt + " articles added"); } if (cnt >= maxdocs) { break; } } executor.shutdown(); // Wait until all threads are finish while (!executor.isTerminated()) { } LOG.info("Total of " + cnt + " articles indexed."); if (cmdline.hasOption(OPTIMIZE_OPTION)) { LOG.info("Merging segments..."); writer.forceMerge(1); LOG.info("Done!"); } LOG.info("Total elapsed time: " + (System.currentTimeMillis() - startTime) + "ms"); } catch (Exception e) { e.printStackTrace(); } finally { writer.close(); dir.close(); out.close(); br.close(); fis.close(); } }
From source file:CourserankConnector.java
public static void main(String[] args) throws Exception { /////////////////////////////////////// //Tagger init //MaxentTagger tagger = new MaxentTagger("models/english-left3words-distsim.tagger"); /////from ww w. j ava 2s .c om //CLIENT INITIALIZATION ImportData importCourse = new ImportData(); HttpClient httpclient = new DefaultHttpClient(); httpclient = WebClientDevWrapper.wrapClient(httpclient); try { /* httpclient.getCredentialsProvider().setCredentials( new AuthScope(null, -1), new UsernamePasswordCredentials("eadrian", "eactresp1")); */ ////////////////////////////////////////////////// //Get Course Bulletin Departments page List<Course> courses = new ArrayList<Course>(); HttpGet httpget = new HttpGet("http://explorecourses.stanford.edu"); System.out.println("executing request" + httpget.getRequestLine()); HttpResponse response = httpclient.execute(httpget); HttpEntity entity = response.getEntity(); System.out.println("----------------------------------------"); System.out.println(response.getStatusLine()); String bulletinpage = ""; //STORE RETURNED HTML TO BULLETINPAGE if (entity != null) { //System.out.println("Response content length: " + entity.getContentLength()); InputStream i = entity.getContent(); BufferedReader br = new BufferedReader(new InputStreamReader(i)); String line; while ((line = br.readLine()) != null) { bulletinpage += line; //System.out.println(line); } br.close(); i.close(); } EntityUtils.consume(entity); /////////////////////////////////////////////////////////////////////////////// //Login to Courserank httpget = new HttpGet("https://courserank.com/stanford/main"); System.out.println("executing request" + httpget.getRequestLine()); response = httpclient.execute(httpget); entity = response.getEntity(); System.out.println("----------------------------------------"); System.out.println(response.getStatusLine()); String page = ""; if (entity != null) { System.out.println("Response content length: " + entity.getContentLength()); InputStream i = entity.getContent(); BufferedReader br = new BufferedReader(new InputStreamReader(i)); String line; while ((line = br.readLine()) != null) { page += line; //System.out.println(line); } br.close(); i.close(); } EntityUtils.consume(entity); //////////////////////////////////////////////////// //POST REQUEST LOGIN HttpPost post = new HttpPost("https://www.courserank.com/stanford/main"); List<NameValuePair> pairs = new ArrayList<NameValuePair>(2); pairs.add(new BasicNameValuePair("RT", "")); pairs.add(new BasicNameValuePair("action", "login")); pairs.add(new BasicNameValuePair("password", "trespass")); pairs.add(new BasicNameValuePair("username", "eaconte@stanford.edu")); post.setEntity(new UrlEncodedFormEntity(pairs)); System.out.println("executing request" + post.getRequestLine()); HttpResponse resp = httpclient.execute(post); HttpEntity ent = resp.getEntity(); System.out.println("----------------------------------------"); if (ent != null) { System.out.println("Response content length: " + ent.getContentLength()); InputStream i = ent.getContent(); BufferedReader br = new BufferedReader(new InputStreamReader(i)); String line; while ((line = br.readLine()) != null) { //System.out.println(line); } br.close(); i.close(); } EntityUtils.consume(ent); /////////////////////////////////////////////////// //THIS STEP MAY NOT BE NEEDED BUT GETS MAIN PROFILE PAGE HttpGet gethome = new HttpGet("https://www.courserank.com/stanford/home"); System.out.println("executing request" + gethome.getRequestLine()); HttpResponse gresp = httpclient.execute(gethome); HttpEntity gent = gresp.getEntity(); System.out.println("----------------------------------------"); if (ent != null) { System.out.println("Response content length: " + gent.getContentLength()); InputStream i = gent.getContent(); BufferedReader br = new BufferedReader(new InputStreamReader(i)); String line; while ((line = br.readLine()) != null) { //System.out.println(line); } br.close(); i.close(); } ///////////////////////////////////////////////////////////////////////////////// //Parse Bulletin String results = getToken(bulletinpage, "RESULTS HEADER", "Additional Searches"); String[] depts = results.split("href"); //SPLIT FOR EACH DEPARTMENT LINK, ITERATE boolean ready = false; for (int i = 1; i < depts.length; i++) { //EXTRACT LINK, DEPARTMENT NAME AND ABBREVIATION String dept = new String(depts[i]); String abbr = getToken(dept, "(", ")"); String name = getToken(dept, ">", "("); name.trim(); //System.out.println(tagger.tagString(name)); String link = getToken(dept, "=\"", "\">"); System.out.println(name + " : " + abbr + " : " + link); System.out.println("======================================================================"); if (i <= 10 || i >= 127) //values to keep it to undergraduate courses. Excludes law, med, business, overseas continue; /*if (i<=46) continue; */ //Start at BIOHOP /*if (abbr.equals("INTNLREL")) ready = true; if (!ready) continue;*/ //Construct department course search URL //Then request page String URL = "http://explorecourses.stanford.edu/" + link + "&filter-term-Autumn=on&filter-term-Winter=on&filter-term-Spring=on"; httpget = new HttpGet(URL); //System.out.println("executing request" + httpget.getRequestLine()); response = httpclient.execute(httpget); entity = response.getEntity(); //ystem.out.println("----------------------------------------"); //System.out.println(response.getStatusLine()); String rpage = ""; if (entity != null) { //System.out.println("Response content length: " + entity.getContentLength()); InputStream in = entity.getContent(); BufferedReader br = new BufferedReader(new InputStreamReader(in)); String line; while ((line = br.readLine()) != null) { rpage += line; //System.out.println(line); } br.close(); in.close(); } EntityUtils.consume(entity); //Process results page List<Course> deptCourses = new ArrayList<Course>(); List<Course> result = processResultPage(rpage); deptCourses.addAll(result); //While there are more result pages, keep going boolean more = (!(result.size() == 0) && (result.get((result.size() - 1)).courseNumber < 299)); boolean morepages = anotherPage(rpage); while (morepages && more) { URL = nextURL(URL); httpget = new HttpGet(URL); //System.out.println("executing request" + httpget.getRequestLine()); response = httpclient.execute(httpget); entity = response.getEntity(); //System.out.println("----------------------------------------"); //System.out.println(response.getStatusLine()); rpage = ""; if (entity != null) { //System.out.println("Response content length: " + entity.getContentLength()); InputStream in = entity.getContent(); BufferedReader br = new BufferedReader(new InputStreamReader(in)); String line; while ((line = br.readLine()) != null) { rpage += line; //System.out.println(line); } br.close(); in.close(); } EntityUtils.consume(entity); morepages = anotherPage(rpage); result = processResultPage(rpage); deptCourses.addAll(result); more = (!(result.size() == 0) && (result.get((result.size() - 1)).courseNumber < 299)); /*String mores = more? "yes": "no"; String pagess = morepages?"yes":"no"; System.out.println("more: "+mores+" morepages: "+pagess); System.out.println("more");*/ } //Get course ratings for all department courses via courserank deptCourses = getRatings(httpclient, abbr, deptCourses); for (int j = 0; j < deptCourses.size(); j++) { Course c = deptCourses.get(j); System.out.println("" + c.title + " : " + c.rating); c.tags = name; c.code = c.code.trim(); c.department = name; c.deptAB = abbr; c.writeToDatabase(); //System.out.println(tagger.tagString(c.title)); } } if (!page.equals("")) return; /////////////////////////////////////////////////// //Get Course Bulletin Department courses /* httpget = new HttpGet("https://courserank.com/stanford/main"); System.out.println("executing request" + httpget.getRequestLine()); response = httpclient.execute(httpget); entity = response.getEntity(); System.out.println("----------------------------------------"); System.out.println(response.getStatusLine()); page = ""; if (entity != null) { System.out.println("Response content length: " + entity.getContentLength()); InputStream i = entity.getContent(); BufferedReader br = new BufferedReader(new InputStreamReader(i)); String line; while ((line = br.readLine()) != null) { page +=line; //System.out.println(line); } br.close(); i.close(); } EntityUtils.consume(entity); //////////////////////////////////////////////////// //POST REQUEST LOGIN HttpPost post = new HttpPost("https://www.courserank.com/stanford/main"); List<NameValuePair> pairs = new ArrayList<NameValuePair>(2); pairs.add(new BasicNameValuePair("RT", "")); pairs.add(new BasicNameValuePair("action", "login")); pairs.add(new BasicNameValuePair("password", "trespass")); pairs.add(new BasicNameValuePair("username", "eaconte@stanford.edu")); post.setEntity(new UrlEncodedFormEntity(pairs)); System.out.println("executing request" + post.getRequestLine()); HttpResponse resp = httpclient.execute(post); HttpEntity ent = resp.getEntity(); System.out.println("----------------------------------------"); if (ent != null) { System.out.println("Response content length: " + ent.getContentLength()); InputStream i = ent.getContent(); BufferedReader br = new BufferedReader(new InputStreamReader(i)); String line; while ((line = br.readLine()) != null) { //System.out.println(line); } br.close(); i.close(); } EntityUtils.consume(ent); /////////////////////////////////////////////////// //THIS STEP MAY NOT BE NEEDED BUT GETS MAIN PROFILE PAGE HttpGet gethome = new HttpGet("https://www.courserank.com/stanford/home"); System.out.println("executing request" + gethome.getRequestLine()); HttpResponse gresp = httpclient.execute(gethome); HttpEntity gent = gresp.getEntity(); System.out.println("----------------------------------------"); if (ent != null) { System.out.println("Response content length: " + gent.getContentLength()); InputStream i = gent.getContent(); BufferedReader br = new BufferedReader(new InputStreamReader(i)); String line; while ((line = br.readLine()) != null) { //System.out.println(line); } br.close(); i.close(); } //////////////////////////////////////// //GETS FIRST PAGE OF RESULTS EntityUtils.consume(gent); post = new HttpPost("https://www.courserank.com/stanford/search_results"); pairs = new ArrayList<NameValuePair>(2); pairs.add(new BasicNameValuePair("filter_term_currentYear", "on")); pairs.add(new BasicNameValuePair("query", "")); post.setEntity(new UrlEncodedFormEntity(pairs)); System.out.println("executing request" + post.getRequestLine()); resp = httpclient.execute(post); ent = resp.getEntity(); System.out.println("----------------------------------------"); String rpage = ""; if (ent != null) { System.out.println("Response content length: " + ent.getContentLength()); InputStream i = ent.getContent(); BufferedReader br = new BufferedReader(new InputStreamReader(i)); String line; while ((line = br.readLine()) != null) { rpage += line; System.out.println(line); } br.close(); i.close(); } EntityUtils.consume(ent); //////////////////////////////////////////////////// //PARSE FIRST PAGE OF RESULTS //int index = rpage.indexOf("div class=\"searchItem"); String []classSplit = rpage.split("div class=\"searchItem"); for (int i=1; i<classSplit.length; i++) { String str = classSplit[i]; //ID String CID = getToken(str, "course?id=","\">"); // CODE String CODE = getToken(str,"class=\"code\">" ,":</"); //TITLE String NAME = getToken(str, "class=\"title\">","</"); //DESCRIP String DES = getToken(str, "class=\"description\">","</"); //TERM String TERM = getToken(str, "Terms:", "|"); //UNITS String UNITS = getToken(str, "Units:", "<br/>"); //WORKLOAD String WLOAD = getToken(str, "Workload:", "|"); //GER String GER = getToken(str, "GERs:", "</d"); //RATING int searchIndex = 0; float rating = 0; while (true) { int ratingIndex = str.indexOf("large_Full", searchIndex); if (ratingIndex ==-1) { int halfratingIndex = str.indexOf("large_Half", searchIndex); if (halfratingIndex == -1) break; else rating += .5; break; } searchIndex = ratingIndex+1; rating++; } String RATING = ""+rating; //GRADE String GRADE = getToken(str, "div class=\"unofficialGrade\">", "</"); if (GRADE.equals("NOT FOUND")) { GRADE = getToken(str, "div class=\"officialGrade\">", "</"); } //REVIEWS String REVIEWS = getToken(str, "class=\"ratings\">", " ratings"); System.out.println(""+CODE+" : "+NAME + " : "+CID); System.out.println("----------------------------------------"); System.out.println("Term: "+TERM+" Units: "+UNITS+ " Workload: "+WLOAD + " Grade: "+ GRADE); System.out.println("Rating: "+RATING+ " Reviews: "+REVIEWS); System.out.println("=========================================="); System.out.println(DES); System.out.println("=========================================="); } /////////////////////////////////////////////////// //GETS SECOND PAGE OF RESULTS post = new HttpPost("https://www.courserank.com/stanford/search_results"); pairs = new ArrayList<NameValuePair>(2); pairs.add(new BasicNameValuePair("filter_term_currentYear", "on")); pairs.add(new BasicNameValuePair("page", "2")); pairs.add(new BasicNameValuePair("query", "")); post.setEntity(new UrlEncodedFormEntity(pairs)); System.out.println("executing request" + post.getRequestLine()); resp = httpclient.execute(post); ent = resp.getEntity(); System.out.println("----------------------------------------"); if (ent != null) { System.out.println("Response content length: " + ent.getContentLength()); InputStream i = ent.getContent(); BufferedReader br = new BufferedReader(new InputStreamReader(i)); String line; while ((line = br.readLine()) != null) { //System.out.println(line); } br.close(); i.close(); } EntityUtils.consume(ent); /* httpget = new HttpGet("https://github.com/"); System.out.println("executing request" + httpget.getRequestLine()); response = httpclient.execute(httpget); entity = response.getEntity(); System.out.println("----------------------------------------"); System.out.println(response.getStatusLine()); page = ""; if (entity != null) { System.out.println("Response content length: " + entity.getContentLength()); InputStream i = entity.getContent(); BufferedReader br = new BufferedReader(new InputStreamReader(i)); String line; while ((line = br.readLine()) != null) { page +=line; System.out.println(line); } br.close(); i.close(); }*/ EntityUtils.consume(entity); } finally { // When HttpClient instance is no longer needed, // shut down the connection manager to ensure // immediate deallocation of all system resources httpclient.getConnectionManager().shutdown(); } }
From source file:jmxbf.java
public static void main(String[] args) throws IOException, MalformedObjectNameException { String HOST = ""; String PORT = ""; String usersFile = ""; String pwdFile = ""; CommandLine cmd = getParsedCommandLine(args); if (cmd != null) { HOST = cmd.getOptionValue("host"); PORT = cmd.getOptionValue("port"); usersFile = cmd.getOptionValue("usernames-file"); pwdFile = cmd.getOptionValue("passwords-file"); } else {//from w ww . ja va 2 s .c om System.exit(1); } String finalResults = ""; BufferedReader users = new BufferedReader(new FileReader(usersFile)); BufferedReader pwds = new BufferedReader(new FileReader(pwdFile)); JMXServiceURL url = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://" + HOST + ":" + PORT + "/jmxrmi"); //new JMXServiceURL("service:jmx:remoting-jmx://" + HOST + ":" + PORT); String user = null; boolean found = false; while ((user = users.readLine()) != null) { String pwd = null; while ((pwd = pwds.readLine()) != null) { //System.out.println(user+":"+pwd); Map<String, String[]> env = new HashMap<>(); String[] credentials = { user, pwd }; env.put(JMXConnector.CREDENTIALS, credentials); try { JMXConnector jmxConnector = JMXConnectorFactory.connect(url, env); System.out.println(); System.out.println(); System.out.println(); System.out.println( "[+] ###SUCCESS### - We got a valid connection for: " + user + ":" + pwd + "\r\n\r\n"); finalResults = finalResults + "\n" + user + ":" + pwd; jmxConnector.close(); found = true; break; } catch (java.lang.SecurityException e) { System.out.println("Auth failed!!!\r\n"); } } if (found) { System.out.println("Found some valid credentials - continuing brute force"); found = false; } //closing and reopening pwds pwds.close(); pwds = new BufferedReader(new FileReader(pwdFile)); } users.close(); //print final results if (finalResults.length() != 0) { System.out.println("The following valid credentials were found:\n"); System.out.println(finalResults); } }
From source file:edu.nyu.vida.data_polygamy.feature_identification.IndexCreation.java
/** * @param args// ww w.ja v a 2s . co m */ @SuppressWarnings({ "deprecation" }) public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException { Options options = new Options(); Option forceOption = new Option("f", "force", false, "force the computation of the index and events " + "even if files already exist"); forceOption.setRequired(false); options.addOption(forceOption); Option thresholdOption = new Option("t", "use-custom-thresholds", false, "use custom thresholds for regular and rare events, defined in HDFS_HOME/" + FrameworkUtils.thresholdDir + " file"); thresholdOption.setRequired(false); options.addOption(thresholdOption); Option gOption = new Option("g", "group", true, "set group of datasets for which the indices and events" + " will be computed"); gOption.setRequired(true); gOption.setArgName("GROUP"); gOption.setArgs(Option.UNLIMITED_VALUES); options.addOption(gOption); Option machineOption = new Option("m", "machine", true, "machine identifier"); machineOption.setRequired(true); machineOption.setArgName("MACHINE"); machineOption.setArgs(1); options.addOption(machineOption); Option nodesOption = new Option("n", "nodes", true, "number of nodes"); nodesOption.setRequired(true); nodesOption.setArgName("NODES"); nodesOption.setArgs(1); options.addOption(nodesOption); Option s3Option = new Option("s3", "s3", false, "data on Amazon S3"); s3Option.setRequired(false); options.addOption(s3Option); Option awsAccessKeyIdOption = new Option("aws_id", "aws-id", true, "aws access key id; " + "this is required if the execution is on aws"); awsAccessKeyIdOption.setRequired(false); awsAccessKeyIdOption.setArgName("AWS-ACCESS-KEY-ID"); awsAccessKeyIdOption.setArgs(1); options.addOption(awsAccessKeyIdOption); Option awsSecretAccessKeyOption = new Option("aws_key", "aws-id", true, "aws secrect access key; " + "this is required if the execution is on aws"); awsSecretAccessKeyOption.setRequired(false); awsSecretAccessKeyOption.setArgName("AWS-SECRET-ACCESS-KEY"); awsSecretAccessKeyOption.setArgs(1); options.addOption(awsSecretAccessKeyOption); Option bucketOption = new Option("b", "s3-bucket", true, "bucket on s3; " + "this is required if the execution is on aws"); bucketOption.setRequired(false); bucketOption.setArgName("S3-BUCKET"); bucketOption.setArgs(1); options.addOption(bucketOption); Option helpOption = new Option("h", "help", false, "display this message"); helpOption.setRequired(false); options.addOption(helpOption); HelpFormatter formatter = new HelpFormatter(); CommandLineParser parser = new PosixParser(); CommandLine cmd = null; try { cmd = parser.parse(options, args); } catch (ParseException e) { formatter.printHelp("hadoop jar data-polygamy.jar " + "edu.nyu.vida.data_polygamy.feature_identification.IndexCreation", options, true); System.exit(0); } if (cmd.hasOption("h")) { formatter.printHelp("hadoop jar data-polygamy.jar " + "edu.nyu.vida.data_polygamy.feature_identification.IndexCreation", options, true); System.exit(0); } boolean s3 = cmd.hasOption("s3"); String s3bucket = ""; String awsAccessKeyId = ""; String awsSecretAccessKey = ""; if (s3) { if ((!cmd.hasOption("aws_id")) || (!cmd.hasOption("aws_key")) || (!cmd.hasOption("b"))) { System.out.println( "Arguments 'aws_id', 'aws_key', and 'b'" + " are mandatory if execution is on AWS."); formatter.printHelp("hadoop jar data-polygamy.jar " + "edu.nyu.vida.data_polygamy.feature_identification.IndexCreation", options, true); System.exit(0); } s3bucket = cmd.getOptionValue("b"); awsAccessKeyId = cmd.getOptionValue("aws_id"); awsSecretAccessKey = cmd.getOptionValue("aws_key"); } boolean snappyCompression = false; boolean bzip2Compression = false; String machine = cmd.getOptionValue("m"); int nbNodes = Integer.parseInt(cmd.getOptionValue("n")); Configuration s3conf = new Configuration(); if (s3) { s3conf.set("fs.s3.awsAccessKeyId", awsAccessKeyId); s3conf.set("fs.s3.awsSecretAccessKey", awsSecretAccessKey); s3conf.set("bucket", s3bucket); } String datasetNames = ""; String datasetIds = ""; ArrayList<String> shortDataset = new ArrayList<String>(); ArrayList<String> shortDatasetIndex = new ArrayList<String>(); HashMap<String, String> datasetAgg = new HashMap<String, String>(); HashMap<String, String> datasetId = new HashMap<String, String>(); HashMap<String, HashMap<Integer, Double>> datasetRegThreshold = new HashMap<String, HashMap<Integer, Double>>(); HashMap<String, HashMap<Integer, Double>> datasetRareThreshold = new HashMap<String, HashMap<Integer, Double>>(); Path path = null; FileSystem fs = FileSystem.get(new Configuration()); BufferedReader br; boolean removeExistingFiles = cmd.hasOption("f"); boolean isThresholdUserDefined = cmd.hasOption("t"); for (String dataset : cmd.getOptionValues("g")) { // getting aggregates String[] aggregate = FrameworkUtils.searchAggregates(dataset, s3conf, s3); if (aggregate.length == 0) { System.out.println("No aggregates found for " + dataset + "."); continue; } // getting aggregates header String aggregatesHeaderFileName = FrameworkUtils.searchAggregatesHeader(dataset, s3conf, s3); if (aggregatesHeaderFileName == null) { System.out.println("No aggregate header for " + dataset); continue; } String aggregatesHeader = s3bucket + FrameworkUtils.preProcessingDir + "/" + aggregatesHeaderFileName; shortDataset.add(dataset); datasetId.put(dataset, null); if (s3) { path = new Path(aggregatesHeader); fs = FileSystem.get(path.toUri(), s3conf); } else { path = new Path(fs.getHomeDirectory() + "/" + aggregatesHeader); } br = new BufferedReader(new InputStreamReader(fs.open(path))); datasetAgg.put(dataset, br.readLine().split("\t")[1]); br.close(); if (s3) fs.close(); } if (shortDataset.size() == 0) { System.out.println("No datasets to process."); System.exit(0); } // getting dataset id if (s3) { path = new Path(s3bucket + FrameworkUtils.datasetsIndexDir); fs = FileSystem.get(path.toUri(), s3conf); } else { path = new Path(fs.getHomeDirectory() + "/" + FrameworkUtils.datasetsIndexDir); } br = new BufferedReader(new InputStreamReader(fs.open(path))); String line = br.readLine(); while (line != null) { String[] dt = line.split("\t"); if (datasetId.containsKey(dt[0])) { datasetId.put(dt[0], dt[1]); datasetNames += dt[0] + ","; datasetIds += dt[1] + ","; } line = br.readLine(); } br.close(); datasetNames = datasetNames.substring(0, datasetNames.length() - 1); datasetIds = datasetIds.substring(0, datasetIds.length() - 1); Iterator<String> it = shortDataset.iterator(); while (it.hasNext()) { String dataset = it.next(); if (datasetId.get(dataset) == null) { System.out.println("No dataset id for " + dataset); System.exit(0); } } // getting user defined thresholds if (isThresholdUserDefined) { if (s3) { path = new Path(s3bucket + FrameworkUtils.thresholdDir); fs = FileSystem.get(path.toUri(), s3conf); } else { path = new Path(fs.getHomeDirectory() + "/" + FrameworkUtils.thresholdDir); } br = new BufferedReader(new InputStreamReader(fs.open(path))); line = br.readLine(); while (line != null) { // getting dataset name String dataset = line.trim(); HashMap<Integer, Double> regThresholds = new HashMap<Integer, Double>(); HashMap<Integer, Double> rareThresholds = new HashMap<Integer, Double>(); line = br.readLine(); while ((line != null) && (line.split("\t").length > 1)) { // getting attribute ids and thresholds String[] keyVals = line.trim().split("\t"); int att = Integer.parseInt(keyVals[0].trim()); regThresholds.put(att, Double.parseDouble(keyVals[1].trim())); rareThresholds.put(att, Double.parseDouble(keyVals[2].trim())); line = br.readLine(); } datasetRegThreshold.put(dataset, regThresholds); datasetRareThreshold.put(dataset, rareThresholds); } br.close(); } if (s3) fs.close(); // datasets that will use existing merge tree ArrayList<String> useMergeTree = new ArrayList<String>(); // creating index for each spatio-temporal resolution FrameworkUtils.createDir(s3bucket + FrameworkUtils.indexDir, s3conf, s3); HashSet<String> input = new HashSet<String>(); for (String dataset : shortDataset) { String indexCreationOutputFileName = s3bucket + FrameworkUtils.indexDir + "/" + dataset + "/"; String mergeTreeFileName = s3bucket + FrameworkUtils.mergeTreeDir + "/" + dataset + "/"; if (removeExistingFiles) { FrameworkUtils.removeFile(indexCreationOutputFileName, s3conf, s3); FrameworkUtils.removeFile(mergeTreeFileName, s3conf, s3); FrameworkUtils.createDir(mergeTreeFileName, s3conf, s3); } else if (datasetRegThreshold.containsKey(dataset)) { FrameworkUtils.removeFile(indexCreationOutputFileName, s3conf, s3); if (FrameworkUtils.fileExists(mergeTreeFileName, s3conf, s3)) { useMergeTree.add(dataset); } } if (!FrameworkUtils.fileExists(indexCreationOutputFileName, s3conf, s3)) { input.add(s3bucket + FrameworkUtils.aggregatesDir + "/" + dataset); shortDatasetIndex.add(dataset); } } if (input.isEmpty()) { System.out.println("All the input datasets have indices."); System.out.println("Use -f in the beginning of the command line to force the computation."); System.exit(0); } String aggregateDatasets = ""; it = input.iterator(); while (it.hasNext()) { aggregateDatasets += it.next() + ","; } Job icJob = null; Configuration icConf = new Configuration(); Machine machineConf = new Machine(machine, nbNodes); String jobName = "index"; String indexOutputDir = s3bucket + FrameworkUtils.indexDir + "/tmp/"; FrameworkUtils.removeFile(indexOutputDir, s3conf, s3); icConf.set("dataset-name", datasetNames); icConf.set("dataset-id", datasetIds); if (!useMergeTree.isEmpty()) { String useMergeTreeStr = ""; for (String dt : useMergeTree) { useMergeTreeStr += dt + ","; } icConf.set("use-merge-tree", useMergeTreeStr.substring(0, useMergeTreeStr.length() - 1)); } for (int i = 0; i < shortDataset.size(); i++) { String dataset = shortDataset.get(i); String id = datasetId.get(dataset); icConf.set("dataset-" + id + "-aggregates", datasetAgg.get(dataset)); if (datasetRegThreshold.containsKey(dataset)) { HashMap<Integer, Double> regThresholds = datasetRegThreshold.get(dataset); String thresholds = ""; for (int att : regThresholds.keySet()) { thresholds += String.valueOf(att) + "-" + String.valueOf(regThresholds.get(att)) + ","; } icConf.set("regular-" + id, thresholds.substring(0, thresholds.length() - 1)); } if (datasetRareThreshold.containsKey(dataset)) { HashMap<Integer, Double> rareThresholds = datasetRareThreshold.get(dataset); String thresholds = ""; for (int att : rareThresholds.keySet()) { thresholds += String.valueOf(att) + "-" + String.valueOf(rareThresholds.get(att)) + ","; } icConf.set("rare-" + id, thresholds.substring(0, thresholds.length() - 1)); } } icConf.set("mapreduce.tasktracker.map.tasks.maximum", String.valueOf(machineConf.getMaximumTasks())); icConf.set("mapreduce.tasktracker.reduce.tasks.maximum", String.valueOf(machineConf.getMaximumTasks())); icConf.set("mapreduce.jobtracker.maxtasks.perjob", "-1"); icConf.set("mapreduce.reduce.shuffle.parallelcopies", "20"); icConf.set("mapreduce.input.fileinputformat.split.minsize", "0"); icConf.set("mapreduce.task.io.sort.mb", "200"); icConf.set("mapreduce.task.io.sort.factor", "100"); //icConf.set("mapreduce.task.timeout", "1800000"); machineConf.setMachineConfiguration(icConf); if (s3) { machineConf.setMachineConfiguration(icConf); icConf.set("fs.s3.awsAccessKeyId", awsAccessKeyId); icConf.set("fs.s3.awsSecretAccessKey", awsSecretAccessKey); icConf.set("bucket", s3bucket); } if (snappyCompression) { icConf.set("mapreduce.map.output.compress", "true"); icConf.set("mapreduce.map.output.compress.codec", "org.apache.hadoop.io.compress.SnappyCodec"); //icConf.set("mapreduce.output.fileoutputformat.compress.codec", "org.apache.hadoop.io.compress.SnappyCodec"); } if (bzip2Compression) { icConf.set("mapreduce.map.output.compress", "true"); icConf.set("mapreduce.map.output.compress.codec", "org.apache.hadoop.io.compress.BZip2Codec"); //icConf.set("mapreduce.output.fileoutputformat.compress.codec", "org.apache.hadoop.io.compress.BZip2Codec"); } icJob = new Job(icConf); icJob.setJobName(jobName); icJob.setMapOutputKeyClass(AttributeResolutionWritable.class); icJob.setMapOutputValueClass(SpatioTemporalFloatWritable.class); icJob.setOutputKeyClass(AttributeResolutionWritable.class); icJob.setOutputValueClass(TopologyTimeSeriesWritable.class); //icJob.setOutputKeyClass(Text.class); //icJob.setOutputValueClass(Text.class); icJob.setMapperClass(IndexCreationMapper.class); icJob.setReducerClass(IndexCreationReducer.class); icJob.setNumReduceTasks(machineConf.getNumberReduces()); icJob.setInputFormatClass(SequenceFileInputFormat.class); //icJob.setOutputFormatClass(SequenceFileOutputFormat.class); LazyOutputFormat.setOutputFormatClass(icJob, SequenceFileOutputFormat.class); //LazyOutputFormat.setOutputFormatClass(icJob, TextOutputFormat.class); SequenceFileOutputFormat.setCompressOutput(icJob, true); SequenceFileOutputFormat.setOutputCompressionType(icJob, CompressionType.BLOCK); FileInputFormat.setInputDirRecursive(icJob, true); FileInputFormat.setInputPaths(icJob, aggregateDatasets.substring(0, aggregateDatasets.length() - 1)); FileOutputFormat.setOutputPath(icJob, new Path(indexOutputDir)); icJob.setJarByClass(IndexCreation.class); long start = System.currentTimeMillis(); icJob.submit(); icJob.waitForCompletion(true); System.out.println(jobName + "\t" + (System.currentTimeMillis() - start)); // moving files to right place for (String dataset : shortDatasetIndex) { String from = s3bucket + FrameworkUtils.indexDir + "/tmp/" + dataset + "/"; String to = s3bucket + FrameworkUtils.indexDir + "/" + dataset + "/"; FrameworkUtils.renameFile(from, to, s3conf, s3); } }
From source file:com.wakacommerce.common.util.PomEvaluator.java
/** * @param args//w w w .ja va 2 s. c o m */ public static void main(String[] args) { initializeKnownLibraries(); BufferedReader br = null; try { String fileName = "/Users/brianpolster/blc-workspace/BroadleafCommerce/pom.xml"; if (args.length > 0) { fileName = args[0]; } br = new BufferedReader(new FileReader(fileName)); forwardToTag("<dependencies>", br); List<Dependency> dependencies = populateDependencies(br); for (Dependency dependency : dependencies) { Category category = knownLibraries.get(dependency.groupId); if (category != null) { category.dependencyList.add(dependency); List<Dependency> licenseDependencyList = licenseDependencyMap.get(category.licenseType); if (licenseDependencyList == null) { licenseDependencyList = new ArrayList<Dependency>(); licenseDependencyList.add(dependency); licenseDependencyMap.put(category.licenseType, licenseDependencyList); } } else { if (dependency.scope != null && (dependency.scope.equals("test") || dependency.scope.equals("provided"))) { continue; } OTHER.dependencyList.add(dependency); } } Set<Category> categoryList = new HashSet<Category>(knownLibraries.values()); System.out.println("Related Software Report\r"); for (Category category : categoryList) { printOutDependencies(category, category.dependencyList); } if (OTHER.dependencyList.size() > 0) { printOutDependencies(OTHER, OTHER.dependencyList); } } catch (IOException e) { e.printStackTrace(); } finally { try { if (br != null) br.close(); } catch (IOException ex) { ex.printStackTrace(); } } }
From source file:at.spardat.xma.xdelta.JarPatcher.java
/** * Main method to make {@link #applyDelta(ZipFile, ZipFile, ZipArchiveOutputStream, BufferedReader)} available at * the command line.<br>//from w w w . j ava2 s . com * usage JarPatcher source patch output * * @param args the arguments * @throws IOException Signals that an I/O exception has occurred. */ public static void main(String[] args) throws IOException { String patchName = null; String outputName = null; String sourceName = null; if (args.length == 0) { System.err.println("usage JarPatcher patch [output [source]]"); System.exit(1); } else { patchName = args[0]; if (args.length > 1) { outputName = args[1]; if (args.length > 2) { sourceName = args[2]; } } } ZipFile patch = new ZipFile(patchName); ZipArchiveEntry listEntry = patch.getEntry("META-INF/file.list"); if (listEntry == null) { System.err.println("Invalid patch - list entry 'META-INF/file.list' not found"); System.exit(2); } BufferedReader list = new BufferedReader(new InputStreamReader(patch.getInputStream(listEntry))); String next = list.readLine(); if (sourceName == null) { sourceName = next; } next = list.readLine(); if (outputName == null) { outputName = next; } int ignoreSourcePaths = Integer.parseInt(System.getProperty("patcher.ignoreSourcePathElements", "0")); int ignoreOutputPaths = Integer.parseInt(System.getProperty("patcher.ignoreOutputPathElements", "0")); Path sourcePath = Paths.get(sourceName); Path outputPath = Paths.get(outputName); if (ignoreOutputPaths >= outputPath.getNameCount()) { patch.close(); StringBuilder b = new StringBuilder().append("Not enough path elements to ignore in output (") .append(ignoreOutputPaths).append(" in ").append(outputName).append(")"); throw new IOException(b.toString()); } if (ignoreSourcePaths >= sourcePath.getNameCount()) { patch.close(); StringBuilder b = new StringBuilder().append("Not enough path elements to ignore in source (") .append(sourcePath).append(" in ").append(sourceName).append(")"); throw new IOException(b.toString()); } sourcePath = sourcePath.subpath(ignoreSourcePaths, sourcePath.getNameCount()); outputPath = outputPath.subpath(ignoreOutputPaths, outputPath.getNameCount()); File sourceFile = sourcePath.toFile(); File outputFile = outputPath.toFile(); if (!(outputFile.getAbsoluteFile().getParentFile().mkdirs() || outputFile.getAbsoluteFile().getParentFile().exists())) { patch.close(); throw new IOException("Failed to create " + outputFile.getAbsolutePath()); } new JarPatcher(patchName, sourceFile.getName()).applyDelta(patch, new ZipFile(sourceFile), new ZipArchiveOutputStream(new FileOutputStream(outputFile)), list); list.close(); }
From source file:com.medicaid.mmis.util.DataLoader.java
/** * The main function, imports the files given as arguments. * /*w ww . j a v a 2 s .c o m*/ * @param args the file names * @throws IOException for read/write errors * @throws PortalServiceException for any other errors */ public static void main(String[] args) throws IOException, PortalServiceException { if (args.length != 2) { System.out.println("2 file path arguments are required."); return; } PropertyConfigurator.configure("log4j.properties"); logger = Logger.getLogger(DataLoader.class); LookupServiceBean lookupBean = new LookupServiceBean(); EntityManagerFactory emf = Persistence.createEntityManagerFactory("cms-data-load"); EntityManager em = emf.createEntityManager(); lookupBean.setEm(em); DataLoader loader = new DataLoader(); loader.setLookup(lookupBean); SequenceGeneratorBean sequence = new SequenceGeneratorBean(); sequence.setEm(em); ProviderEnrollmentServiceBean enrollmentBean = new ProviderEnrollmentServiceBean(); enrollmentBean.setEm(em); enrollmentBean.setSequence(sequence); enrollmentBean.setLookupService(lookupBean); loader.setEnrollmentService(enrollmentBean); long processId = sequence.getNextValue("PROCESS_ID"); System.out.println("Started process id " + processId); BufferedReader br = null; PrintWriter accepted = null; PrintWriter rejected = null; try { System.out.println("Processing file 1..."); File success = new File("accepted_1_" + processId + ".txt"); File failure = new File("rejected_1_" + processId + ".txt"); success.createNewFile(); failure.createNewFile(); accepted = new PrintWriter(success); rejected = new PrintWriter(failure); br = new BufferedReader(new FileReader(args[0])); String line = null; int total = 0; int errors = 0; while ((line = br.readLine()) != null) { total++; try { em.getTransaction().begin(); loader.readProviderFile(new ByteArrayInputStream(line.getBytes())); em.getTransaction().commit(); accepted.println(line); logger.info("Commit row " + total); } catch (PortalServiceException e) { rejected.println(line); em.getTransaction().rollback(); errors++; logger.error("Rollback row " + total + " :" + e.getMessage()); } } accepted.flush(); accepted.close(); rejected.flush(); rejected.close(); br.close(); System.out.println("Total records read: " + total); System.out.println("Total rejected: " + errors); System.out.println("Processing file 2..."); success = new File("accepted_2_" + processId + ".txt"); failure = new File("rejected_2_" + processId + ".txt"); success.createNewFile(); failure.createNewFile(); accepted = new PrintWriter(success); rejected = new PrintWriter(failure); br = new BufferedReader(new FileReader(args[1])); line = null; total = 0; errors = 0; while ((line = br.readLine()) != null) { total++; try { em.getTransaction().begin(); Map<String, OwnershipInformation> owners = loader .readWS000EXT2OWNBEN(new ByteArrayInputStream(line.getBytes())); for (Map.Entry<String, OwnershipInformation> entry : owners.entrySet()) { enrollmentBean.addBeneficialOwners(entry.getKey(), entry.getValue()); } em.getTransaction().commit(); accepted.println(line); logger.info("Commit row " + total); } catch (PortalServiceException e) { rejected.println(line); em.getTransaction().rollback(); errors++; logger.error("Rollback row " + total + " :" + e.getMessage()); } } accepted.flush(); rejected.flush(); System.out.println("Total records read: " + total); System.out.println("Total rejected: " + errors); } finally { if (br != null) { br.close(); } if (accepted != null) { accepted.close(); } if (rejected != null) { rejected.close(); } } }
From source file:edu.cmu.lti.oaqa.knn4qa.apps.FilterTranTable.java
public static void main(String[] args) { Options options = new Options(); options.addOption(INPUT_PARAM, null, true, INPUT_DESC); options.addOption(OUTPUT_PARAM, null, true, OUTPUT_DESC); options.addOption(CommonParams.MEM_FWD_INDEX_PARAM, null, true, CommonParams.MEM_FWD_INDEX_DESC); options.addOption(CommonParams.GIZA_ITER_QTY_PARAM, null, true, CommonParams.GIZA_ITER_QTY_PARAM); options.addOption(CommonParams.GIZA_ROOT_DIR_PARAM, null, true, CommonParams.GIZA_ROOT_DIR_PARAM); options.addOption(CommonParams.MIN_PROB_PARAM, null, true, CommonParams.MIN_PROB_DESC); options.addOption(CommonParams.MAX_WORD_QTY_PARAM, null, true, CommonParams.MAX_WORD_QTY_PARAM); CommandLineParser parser = new org.apache.commons.cli.GnuParser(); try {/* w ww . j a v a 2 s . c om*/ CommandLine cmd = parser.parse(options, args); String outputFile = null; outputFile = cmd.getOptionValue(OUTPUT_PARAM); if (null == outputFile) { Usage("Specify 'A name of the output file'", options); } String gizaRootDir = cmd.getOptionValue(CommonParams.GIZA_ROOT_DIR_PARAM); if (null == gizaRootDir) { Usage("Specify '" + CommonParams.GIZA_ROOT_DIR_DESC + "'", options); } String gizaIterQty = cmd.getOptionValue(CommonParams.GIZA_ITER_QTY_PARAM); if (null == gizaIterQty) { Usage("Specify '" + CommonParams.GIZA_ITER_QTY_DESC + "'", options); } float minProb = 0; String tmpf = cmd.getOptionValue(CommonParams.MIN_PROB_PARAM); if (tmpf != null) { minProb = Float.parseFloat(tmpf); } int maxWordQty = Integer.MAX_VALUE; String tmpi = cmd.getOptionValue(CommonParams.MAX_WORD_QTY_PARAM); if (null != tmpi) { maxWordQty = Integer.parseInt(tmpi); } String memFwdIndxName = cmd.getOptionValue(CommonParams.MEM_FWD_INDEX_PARAM); if (null == memFwdIndxName) { Usage("Specify '" + CommonParams.MEM_FWD_INDEX_DESC + "'", options); } System.out.println("Filtering index: " + memFwdIndxName + " max # of frequent words: " + maxWordQty + " min. probability:" + minProb); VocabularyFilterAndRecoder filter = new FrequentIndexWordFilterAndRecoder(memFwdIndxName, maxWordQty); String srcVocFile = CompressUtils.findFileVariant(gizaRootDir + "/source.vcb"); System.out.println("Source vocabulary file: " + srcVocFile); GizaVocabularyReader srcVoc = new GizaVocabularyReader(srcVocFile, filter); String dstVocFile = CompressUtils.findFileVariant(gizaRootDir + "/target.vcb"); System.out.println("Target vocabulary file: " + dstVocFile); GizaVocabularyReader dstVoc = new GizaVocabularyReader(CompressUtils.findFileVariant(dstVocFile), filter); String inputFile = CompressUtils.findFileVariant(gizaRootDir + "/output.t1." + gizaIterQty); BufferedReader finp = new BufferedReader( new InputStreamReader(CompressUtils.createInputStream(inputFile))); BufferedWriter fout = new BufferedWriter( new OutputStreamWriter(CompressUtils.createOutputStream(outputFile))); try { String line; int prevSrcId = -1; int wordQty = 0; long addedQty = 0; long totalQty = 0; boolean isNotFiltered = false; for (totalQty = 0; (line = finp.readLine()) != null;) { ++totalQty; // Skip empty lines line = line.trim(); if (line.isEmpty()) continue; GizaTranRec rec = new GizaTranRec(line); if (rec.mSrcId != prevSrcId) { ++wordQty; } if (totalQty % REPORT_INTERVAL_QTY == 0) { System.out.println(String.format( "Processed %d lines (%d source word entries) from '%s', added %d lines", totalQty, wordQty, inputFile, addedQty)); } // isNotFiltered should be set after procOneWord if (rec.mSrcId != prevSrcId) { if (rec.mSrcId == 0) isNotFiltered = true; else { String wordSrc = srcVoc.getWord(rec.mSrcId); isNotFiltered = filter == null || (wordSrc != null && filter.checkWord(wordSrc)); } } prevSrcId = rec.mSrcId; if (rec.mProb >= minProb && isNotFiltered) { String wordDst = dstVoc.getWord(rec.mDstId); if (filter == null || (wordDst != null && filter.checkWord(wordDst))) { fout.write(String.format(rec.mSrcId + " " + rec.mDstId + " " + rec.mProb)); fout.newLine(); addedQty++; } } } System.out.println( String.format("Processed %d lines (%d source word entries) from '%s', added %d lines", totalQty, wordQty, inputFile, addedQty)); } finally { finp.close(); fout.close(); } } catch (ParseException e) { Usage("Cannot parse arguments", options); } catch (Exception e) { e.printStackTrace(); System.err.println("Terminating due to an exception: " + e); System.exit(1); } }
From source file:org.kuali.student.git.importer.ApplyManualBranchCleanup.java
/** * @param args/*from ww w . ja v a 2s .co m*/ */ public static void main(String[] args) { if (args.length < 4 || args.length > 7) { usage(); } File inputFile = new File(args[0]); if (!inputFile.exists()) usage(); boolean bare = false; if (args[2].trim().equals("1")) { bare = true; } String remoteName = args[3].trim(); String refPrefix = Constants.R_HEADS; if (args.length == 5) refPrefix = args[4].trim(); String userName = null; String password = null; if (args.length == 6) userName = args[5].trim(); if (args.length == 7) password = args[6].trim(); try { Repository repo = GitRepositoryUtils.buildFileRepository(new File(args[1]).getAbsoluteFile(), false, bare); Git git = new Git(repo); RevWalk rw = new RevWalk(repo); ObjectInserter objectInserter = repo.newObjectInserter(); BufferedReader fileReader = new BufferedReader(new FileReader(inputFile)); String line = fileReader.readLine(); int lineNumber = 1; BatchRefUpdate batch = repo.getRefDatabase().newBatchUpdate(); List<RefSpec> branchesToDelete = new ArrayList<>(); while (line != null) { if (line.startsWith("#") || line.length() == 0) { // skip over comments and blank lines line = fileReader.readLine(); lineNumber++; continue; } String parts[] = line.trim().split(":"); String branchName = parts[0]; Ref branchRef = repo.getRef(refPrefix + "/" + branchName); if (branchRef == null) { log.warn("line: {}, No branch matching {} exists, skipping.", lineNumber, branchName); line = fileReader.readLine(); lineNumber++; continue; } String tagName = null; if (parts.length > 1) tagName = parts[1]; if (tagName != null) { if (tagName.equals("keep")) { log.info("keeping existing branch for {}", branchName); line = fileReader.readLine(); lineNumber++; continue; } if (tagName.equals("tag")) { /* * Shortcut to say make the tag start with the same name as the branch. */ tagName = branchName; } // create a tag RevCommit commit = rw.parseCommit(branchRef.getObjectId()); ObjectId tag = GitRefUtils.insertTag(tagName, commit, objectInserter); batch.addCommand(new ReceiveCommand(null, tag, Constants.R_TAGS + tagName, Type.CREATE)); log.info("converting branch {} into a tag {}", branchName, tagName); } if (remoteName.equals("local")) { batch.addCommand( new ReceiveCommand(branchRef.getObjectId(), null, branchRef.getName(), Type.DELETE)); } else { // if the branch is remote then remember its name so we can batch delete after we have the full list. branchesToDelete.add(new RefSpec(":" + Constants.R_HEADS + branchName)); } line = fileReader.readLine(); lineNumber++; } fileReader.close(); // run the batch update batch.execute(rw, new TextProgressMonitor()); if (!remoteName.equals("local")) { // push the tag to the remote right now log.info("pushing tags to {}", remoteName); PushCommand pushCommand = git.push().setRemote(remoteName).setPushTags() .setProgressMonitor(new TextProgressMonitor()); if (userName != null) pushCommand.setCredentialsProvider(new UsernamePasswordCredentialsProvider(userName, password)); Iterable<PushResult> results = pushCommand.call(); for (PushResult pushResult : results) { if (!pushResult.equals(Result.NEW)) { log.warn("failed to push tag " + pushResult.getMessages()); } } // delete the branches from the remote log.info("pushing branch deletes to remote: {}", remoteName); results = git.push().setRemote(remoteName).setRefSpecs(branchesToDelete) .setProgressMonitor(new TextProgressMonitor()).call(); } objectInserter.release(); rw.release(); } catch (Exception e) { log.error("unexpected Exception ", e); } }
From source file:edu.mit.fss.examples.ISSFederate.java
/** * The main method. This configures the Orekit data path, creates the * ISS federate objects and launches the associated graphical user * interface./*from w w w.jav a 2 s .c om*/ * * @param args the arguments * @throws RTIexception the RTI exception * @throws URISyntaxException */ public static void main(String[] args) throws RTIexception, URISyntaxException { BasicConfigurator.configure(); boolean headless = false; logger.debug("Setting Orekit data path."); System.setProperty(DataProvidersManager.OREKIT_DATA_PATH, new File(ISSFederate.class.getResource("/orekit-data.zip").toURI()).getAbsolutePath()); logger.trace("Creating federate instance."); final ISSFederate federate = new ISSFederate(); logger.trace("Setting minimum step duration and time step."); long timeStep = 60 * 1000, minimumStepDuration = 100; federate.setMinimumStepDuration(minimumStepDuration); federate.setTimeStep(timeStep); try { logger.debug("Loading TLE data from file."); BufferedReader br = new BufferedReader(new InputStreamReader( federate.getClass().getClassLoader().getResourceAsStream("edu/mit/fss/examples/data.tle"))); final SpaceSystem satellite; final SurfaceSystem station1, station2, station3; while (br.ready()) { if (br.readLine().matches(".*ISS.*")) { logger.debug("Found ISS data."); logger.trace("Adding FSS supplier space system."); satellite = new SpaceSystem("FSS Supplier", new TLE(br.readLine(), br.readLine()), 5123e3); federate.addObject(satellite); logger.trace("Adding Keio ground station."); station1 = new SurfaceSystem("Keio", new GeodeticPoint(FastMath.toRadians(35.551929), FastMath.toRadians(139.647119), 300), satellite.getState().getDate(), 5123e3, 5); federate.addObject(station1); logger.trace("Adding SkolTech ground station."); station2 = new SurfaceSystem("SkolTech", new GeodeticPoint(FastMath.toRadians(55.698679), FastMath.toRadians(37.571994), 200), satellite.getState().getDate(), 5123e3, 5); federate.addObject(station2); logger.trace("Adding MIT ground station."); station3 = new SurfaceSystem("MIT", new GeodeticPoint(FastMath.toRadians(42.360184), FastMath.toRadians(-71.093742), 100), satellite.getState().getDate(), 5123e3, 5); federate.addObject(station3); try { logger.trace("Setting inital time."); federate.setInitialTime( satellite.getInitialState().getDate().toDate(TimeScalesFactory.getUTC()).getTime()); } catch (IllegalArgumentException | OrekitException e) { logger.error(e.getMessage()); e.printStackTrace(); } if (!headless) { logger.debug("Launching the graphical user interface."); SwingUtilities.invokeAndWait(new Runnable() { @Override public void run() { MemberFrame frame = new MemberFrame(federate, new MultiComponentPanel( Arrays.asList(new SpaceSystemPanel(federate, satellite), new SurfaceSystemPanel(federate, station1), new SurfaceSystemPanel(federate, station2), new SurfaceSystemPanel(federate, station3)))); frame.pack(); frame.setVisible(true); } }); } break; } } br.close(); } catch (InvocationTargetException | InterruptedException | OrekitException | IOException e) { e.printStackTrace(); logger.fatal(e); } logger.trace("Setting federate name, type, and FOM path."); federate.getConnection().setFederateName("ISS"); federate.getConnection().setFederateType("FSS Supplier"); federate.getConnection().setFederationName("FSS"); federate.getConnection().setFomPath( new File(federate.getClass().getClassLoader().getResource("edu/mit/fss/hla/fss.xml").toURI()) .getAbsolutePath()); federate.getConnection().setOfflineMode(false); federate.connect(); if (headless) { federate.setMinimumStepDuration(10); federate.initialize(); federate.run(); } }