List of usage examples for java.lang String endsWith
public boolean endsWith(String suffix)
From source file:CSV_ReportingConsolidator.java
public static void main(String[] args) throws IOException { // Construct an array containing the list of files in the input folder String inputPath = "input/"; // Set the directory containing the CSV files String outputPath = "output/"; // Set the output directory for the consolidated report String outputFile = "Consolidated_CSV_Report.csv"; File folder = new File(inputPath); // Load the selected path File[] listOfFiles = folder.listFiles(); // Retrieve the list of files from the directory // Serialize the reference headers to write the output CSV header CSVReader referenceReader = new CSVReader(new FileReader("reference/example_fields.csv")); String[] referenceHeaders = referenceReader.readNext(); CSVWriter writer = new CSVWriter(new FileWriter(outputPath + outputFile), ',', CSVWriter.NO_QUOTE_CHARACTER); System.out.println("-- CSV parser initiated, found " + listOfFiles.length + " input files.\n"); for (int i = 0; i < listOfFiles.length; i++) { if (listOfFiles[i].isFile()) { String filename = listOfFiles[i].getName(); // Retrieve the file name if (!filename.endsWith("csv")) { // Check if the file has a CSV extension System.out.println("EE | Fatal error: The input path contains non-csv files: " + filename + ".\n Please remove them and try again."); writer.close();//from w w w . java 2 s . c om System.exit(1); // Exit if non-CSV files are found } String filePath = String.valueOf(inputPath + filename); // Combine the path with the filename File file = new File(filePath); CSVReader csvFile = new CSVReader(new FileReader(filePath)); String[] nextLine; // CSV line data container int rowIterator = 0; // Used to loop between rows int colIterator = 0; // Used to loop between columns int rowCount = 0; // Used to count the total number of rows int pageCount = 0; int f = 0; String[] pageName = new String[100]; // Holder for Page names double[] individualPRT = new double[100]; // Holder for Page Response Times String PTrun = ""; // Name of Performance Test Run String startTime = ""; // Test start time double PRT = 0; // Average Page Response Time double PRd = 0; // Page Response Time Standard Deviation double ERT = 0; // Average Element Response Time double ERd = 0; // Element Response Time Standard Deviation double MRT = 0; // Maximum Page Response Time double mRT = 0; // Minimum Page Response Time int elapsedTime = 0; // Test Elapsed Time int completedUsers = 0; // Number of Completed Users int TPA = 0; // Total Page Attempts int TPH = 0; // Total Page Hits int TEA = 0; // Total Element Attempts int TEH = 0; // Total Element Hits // Fetch the total row count: FileReader fr = new FileReader(file); LineNumberReader ln = new LineNumberReader(fr); while (ln.readLine() != null) { rowCount++; } ln.close(); // Close the file reader // Fetch test identification data: nextLine = csvFile.readNext(); PTrun = nextLine[1]; // Name of Performance Test Run nextLine = csvFile.readNext(); startTime = nextLine[1]; // Performance Test Start Time // Skip 9 uninteresting rows: while (rowIterator < 9) { nextLine = csvFile.readNext(); rowIterator++; } // Check if there are VP fails (adds another column) if (nextLine[9].equals("Total Page VPs Error For Run")) { f = 2; } else if (nextLine[8].equals("Total Page VPs Failed For Run") || nextLine[8].equals("Total Page VPs Error For Run")) { f = 1; } else { f = 0; } // Read the page titles: while (colIterator != -1) { pageName[colIterator] = nextLine[colIterator + 18 + f]; if ((pageName[colIterator].equals(pageName[0])) && colIterator > 0) { pageCount = colIterator; pageName[colIterator] = null; colIterator = -1; // Detects when the page titles start to repeat } else { colIterator++; } } // Retrieve non-continuous performance data, auto-detect gaps, auto-convert in seconds where needed nextLine = csvFile.readNext(); nextLine = csvFile.readNext(); while (rowIterator < rowCount - 3) { if (nextLine.length > 1) { if (nextLine[0].length() != 0) { elapsedTime = Integer.parseInt(nextLine[0]) / 1000; } } if (nextLine.length > 5) { if (nextLine[5].length() != 0) { completedUsers = Integer.parseInt(nextLine[5]); } } if (nextLine.length > 8 + f) { if (nextLine[8 + f].length() != 0) { TPA = (int) Double.parseDouble(nextLine[8 + f]); } } if (nextLine.length > 9 + f) { if (nextLine[9 + f].length() != 0) { TPH = (int) Double.parseDouble(nextLine[9 + f]); } } if (nextLine.length > 14 + f) { if (nextLine[14 + f].length() != 0) { TEA = (int) Double.parseDouble(nextLine[14 + f]); } } if (nextLine.length > 15 + f) { if (nextLine[15 + f].length() != 0) { TEH = (int) Double.parseDouble(nextLine[15 + f]); } } if (nextLine.length > 10 + f) { if (nextLine[10 + f].length() != 0) { PRT = Double.parseDouble(nextLine[10 + f]) / 1000; } } if (nextLine.length > 11 + f) { if (nextLine[11 + f].length() != 0) { PRd = Double.parseDouble(nextLine[11 + f]) / 1000; } } if (nextLine.length > 16 + f) { if (nextLine[16 + f].length() != 0) { ERT = Double.parseDouble(nextLine[16 + f]) / 1000; } } if (nextLine.length > 17 + f) { if (nextLine[17 + f].length() != 0) { ERd = Double.parseDouble(nextLine[17 + f]) / 1000; } } if (nextLine.length > 12 + f) { if (nextLine[12 + f].length() != 0) { MRT = Double.parseDouble(nextLine[12 + f]) / 1000; } } if (nextLine.length > 13 + f) { if (nextLine[13 + f].length() != 0) { mRT = Double.parseDouble(nextLine[13 + f]) / 1000; } } nextLine = csvFile.readNext(); rowIterator++; } // Convert the elapsed time from seconds to HH:MM:SS format int hours = elapsedTime / 3600, remainder = elapsedTime % 3600, minutes = remainder / 60, seconds = remainder % 60; String eTime = (hours < 10 ? "0" : "") + hours + ":" + (minutes < 10 ? "0" : "") + minutes + ":" + (seconds < 10 ? "0" : "") + seconds; csvFile.close(); // File recycled to reset the line parser CSVReader csvFile2 = new CSVReader(new FileReader(filePath)); // Reset iterators to allow re-usage: rowIterator = 0; colIterator = 0; // Skip first 13 rows: while (rowIterator < 13) { nextLine = csvFile2.readNext(); rowIterator++; } // Dynamically retrieve individual page response times in seconds, correlate with page names: while (rowIterator < rowCount) { while (colIterator < pageCount) { if (nextLine.length > 18 + f) { if (nextLine[colIterator + 18 + f].length() != 0) { individualPRT[colIterator] = Double.parseDouble(nextLine[colIterator + 18 + f]) / 1000; } } colIterator++; } nextLine = csvFile2.readNext(); rowIterator++; colIterator = 0; } csvFile2.close(); // Final file closing // Reset iterators to allow re-usage: rowIterator = 0; colIterator = 0; // Display statistics in console, enable only for debugging purposes: /* System.out.println(" Elapsed Time: " + elapsedTime + "\n Completed Users: " + completedUsers + "\n Total Page Attempts: " + TPA + "\n Total Page Hits: " + TPH + "\n Average Response Time For All Pages For Run: " + PRT + "\n Response Time Standard Deviation For All Pages For Run: " + PRd + "\n Maximum Response Time For All Pages For Run: " + MRT + "\n Minimum Response Time For All Pages For Run: " + mRT + "\n Total Page Element Attempts: " + TEA + "\n Total Page Element Hits: " + TEH + "\n Average Response Time For All Page Elements For Run: " + ERT + "\n Response Time Standard Deviation For All Page Elements For Run: " + ERd + "\n"); // Display individual page response times in console: while (colIterator < 9) { System.out.println("Page " + Page[colIterator] + " - Response Time: " + PagePRT[colIterator]); colIterator++; } */ // Serialize individual Page Response Times into CSV values StringBuffer individualPRTList = new StringBuffer(); if (individualPRT.length > 0) { individualPRTList.append(String.valueOf(individualPRT[0])); for (int k = 1; k < pageCount; k++) { individualPRTList.append(","); individualPRTList.append(String.valueOf(individualPRT[k])); } } // Serialize all retrieved performance parameters: String[] entries = { PTrun, startTime, String.valueOf(completedUsers), eTime, String.valueOf(TPA), String.valueOf(TPH), String.valueOf(PRT), String.valueOf(PRd), String.valueOf(MRT), String.valueOf(mRT), String.valueOf(TEA), String.valueOf(TEH), String.valueOf(ERT), String.valueOf(ERd), "", individualPRTList.toString(), }; // Define header and write it to the first CSV row Object[] headerConcatenator = ArrayUtils.addAll(referenceHeaders, pageName); String[] header = new String[referenceHeaders.length + pageCount]; header = Arrays.copyOf(headerConcatenator, header.length, String[].class); if (i == 0) { writer.writeNext(header); // Write CSV header } writer.writeNext(entries); // Write performance parameters in CSV format System.out.println("== Processed: " + filename + " ==========================="); } } writer.close(); // Close the CSV writer System.out.println("\n-- Done processing " + listOfFiles.length + " files." + "\n-- The consolidated report has been saved to " + outputPath + outputFile); }
From source file:CSV_ReportingConsolidator.java
public static void main(String[] args) throws IOException { // Construct an array containing the list of files in the input folder String inputPath = "input/"; // Set the directory containing the CSV files String outputPath = "output/"; // Set the output directory for the consolidated report String outputFile = "Consolidated_CSV_Report.csv"; File folder = new File(inputPath); // Load the selected path File[] listOfFiles = folder.listFiles(); // Retrieve the list of files from the directory // Serialize the reference headers to write the output CSV header CSVReader referenceReader = new CSVReader(new FileReader("reference/example_fields.csv")); String[] referenceHeaders = referenceReader.readNext(); CSVWriter writer = new CSVWriter(new FileWriter(outputPath + outputFile), ',', CSVWriter.NO_QUOTE_CHARACTER); System.out.println("-- CSV parser initiated, found " + listOfFiles.length + " input files.\n"); for (int i = 0; i < listOfFiles.length; i++) { if (listOfFiles[i].isFile()) { String filename = listOfFiles[i].getName(); // Retrieve the file name if (!filename.endsWith("csv")) { // Check if the file has a CSV extension System.out.println("EE | Fatal error: The input path contains non-csv files: " + filename + ".\n Please remove them and try again."); writer.close();/* ww w. j a v a2s . c o m*/ System.exit(1); // Exit if non-CSV files are found } String filePath = String.valueOf(inputPath + filename); // Combine the path with the filename File file = new File(filePath); CSVReader csvFile = new CSVReader(new FileReader(filePath)); String[] nextLine; // CSV line data container int rowIterator = 0; // Used to loop between rows int colIterator = 0; // Used to loop between columns int rowCount = 0; // Used to count the total number of rows int pageCount = 0; int f = 0; String[] pageName = new String[100]; // Holder for Page names double[] individualPRT = new double[100]; // Holder for Page Response Times String PTrun = ""; // Name of Performance Test Run String startTime = ""; // Test start time double PRT = 0; // Average Page Response Time double PRd = 0; // Page Response Time Standard Deviation double ERT = 0; // Average Element Response Time double ERd = 0; // Element Response Time Standard Deviation double MRT = 0; // Maximum Page Response Time double mRT = 0; // Minimum Page Response Time int elapsedTime = 0; // Test Elapsed Time int completedUsers = 0; // Number of Completed Users int TPA = 0; // Total Page Attempts int TPH = 0; // Total Page Hits int TEA = 0; // Total Element Attempts int TEH = 0; // Total Element Hits // Fetch the total row count: FileReader fr = new FileReader(file); LineNumberReader ln = new LineNumberReader(fr); while (ln.readLine() != null) { rowCount++; } ln.close(); // Close the file reader // Fetch test identification data: nextLine = csvFile.readNext(); PTrun = nextLine[1]; // Name of Performance Test Run nextLine = csvFile.readNext(); startTime = nextLine[1]; // Performance Test Start Time // Skip 9 uninteresting rows: while (rowIterator < 9) { nextLine = csvFile.readNext(); rowIterator++; } // Check if there are VP fails (adds another column) if (nextLine[9].equals("Total Page VPs Error For Run")) { f = 2; } else if (nextLine[8].equals("Total Page VPs Failed For Run") || nextLine[8].equals("Total Page VPs Error For Run")) { f = 1; } else { f = 0; } // Read the page titles: while (colIterator != -1) { pageName[colIterator] = nextLine[colIterator + 16 + f]; if ((pageName[colIterator].equals(pageName[0])) && colIterator > 0) { pageCount = colIterator; pageName[colIterator] = null; colIterator = -1; // Detects when the page titles start to repeat } else { colIterator++; } } // Retrieve non-continuous performance data, auto-detect gaps, auto-convert in seconds where needed nextLine = csvFile.readNext(); nextLine = csvFile.readNext(); while (rowIterator < rowCount - 3) { if (nextLine.length > 1) { if (nextLine[0].length() != 0) { elapsedTime = Integer.parseInt(nextLine[0]) / 1000; } } if (nextLine.length > 4) { if (nextLine[4].length() != 0) { completedUsers = Integer.parseInt(nextLine[4]); } } if (nextLine.length > 6 + f) { if (nextLine[6 + f].length() != 0) { TPA = (int) Double.parseDouble(nextLine[6 + f]); } } if (nextLine.length > 7 + f) { if (nextLine[7 + f].length() != 0) { TPH = (int) Double.parseDouble(nextLine[7 + f]); } } if (nextLine.length > 12 + f) { if (nextLine[12 + f].length() != 0) { TEA = (int) Double.parseDouble(nextLine[12 + f]); } } if (nextLine.length > 13 + f) { if (nextLine[13 + f].length() != 0) { TEH = (int) Double.parseDouble(nextLine[13 + f]); } } if (nextLine.length > 8 + f) { if (nextLine[8 + f].length() != 0) { PRT = Double.parseDouble(nextLine[8 + f]) / 1000; } } if (nextLine.length > 9 + f) { if (nextLine[9 + f].length() != 0) { PRd = Double.parseDouble(nextLine[9 + f]) / 1000; } } if (nextLine.length > 14 + f) { if (nextLine[14 + f].length() != 0) { ERT = Double.parseDouble(nextLine[14 + f]) / 1000; } } if (nextLine.length > 15 + f) { if (nextLine[15 + f].length() != 0) { ERd = Double.parseDouble(nextLine[15 + f]) / 1000; } } if (nextLine.length > 10 + f) { if (nextLine[10 + f].length() != 0) { MRT = Double.parseDouble(nextLine[10 + f]) / 1000; } } if (nextLine.length > 11 + f) { if (nextLine[11 + f].length() != 0) { mRT = Double.parseDouble(nextLine[11 + f]) / 1000; } } nextLine = csvFile.readNext(); rowIterator++; } // Convert the elapsed time from seconds to HH:MM:SS format int hours = elapsedTime / 3600, remainder = elapsedTime % 3600, minutes = remainder / 60, seconds = remainder % 60; String eTime = (hours < 10 ? "0" : "") + hours + ":" + (minutes < 10 ? "0" : "") + minutes + ":" + (seconds < 10 ? "0" : "") + seconds; csvFile.close(); // File recycled to reset the line parser CSVReader csvFile2 = new CSVReader(new FileReader(filePath)); // Reset iterators to allow re-usage: rowIterator = 0; colIterator = 0; // Skip first 13 rows: while (rowIterator < 13) { nextLine = csvFile2.readNext(); rowIterator++; } // Dynamically retrieve individual page response times in seconds, correlate with page names: while (rowIterator < rowCount) { while (colIterator < pageCount) { if (nextLine.length > 16 + f) { if (nextLine[colIterator + 16 + f].length() != 0) { individualPRT[colIterator] = Double.parseDouble(nextLine[colIterator + 16 + f]) / 1000; } } colIterator++; } nextLine = csvFile2.readNext(); rowIterator++; colIterator = 0; } csvFile2.close(); // Final file closing // Reset iterators to allow re-usage: rowIterator = 0; colIterator = 0; // Display statistics in console, enable only for debugging purposes: /* System.out.println(" Elapsed Time: " + elapsedTime + "\n Completed Users: " + completedUsers + "\n Total Page Attempts: " + TPA + "\n Total Page Hits: " + TPH + "\n Average Response Time For All Pages For Run: " + PRT + "\n Response Time Standard Deviation For All Pages For Run: " + PRd + "\n Maximum Response Time For All Pages For Run: " + MRT + "\n Minimum Response Time For All Pages For Run: " + mRT + "\n Total Page Element Attempts: " + TEA + "\n Total Page Element Hits: " + TEH + "\n Average Response Time For All Page Elements For Run: " + ERT + "\n Response Time Standard Deviation For All Page Elements For Run: " + ERd + "\n"); // Display individual page response times in console: while (colIterator < 9) { System.out.println("Page " + Page[colIterator] + " - Response Time: " + PagePRT[colIterator]); colIterator++; } */ // Serialize individual Page Response Times into CSV values StringBuffer individualPRTList = new StringBuffer(); if (individualPRT.length > 0) { individualPRTList.append(String.valueOf(individualPRT[0])); for (int k = 1; k < pageCount; k++) { individualPRTList.append(","); individualPRTList.append(String.valueOf(individualPRT[k])); } } // Serialize all retrieved performance parameters: String[] entries = { PTrun, startTime, String.valueOf(completedUsers), eTime, String.valueOf(TPA), String.valueOf(TPH), String.valueOf(PRT), String.valueOf(PRd), String.valueOf(MRT), String.valueOf(mRT), String.valueOf(TEA), String.valueOf(TEH), String.valueOf(ERT), String.valueOf(ERd), "", individualPRTList.toString(), }; // Define header and write it to the first CSV row Object[] headerConcatenator = ArrayUtils.addAll(referenceHeaders, pageName); String[] header = new String[referenceHeaders.length + pageCount]; header = Arrays.copyOf(headerConcatenator, header.length, String[].class); if (i == 0) { writer.writeNext(header); // Write CSV header } writer.writeNext(entries); // Write performance parameters in CSV format System.out.println("== Processed: " + filename + " ==========================="); } } writer.close(); // Close the CSV writer System.out.println("\n-- Done processing " + listOfFiles.length + " files." + "\n-- The consolidated report has been saved to " + outputPath + outputFile); }
From source file:com.wordnik.swagger.testframework.APITestRunner.java
/** * Follow the following argument pattern * * Arg[0] --> api server URL//from w w w . ja va 2 s. c om * Arg[1] --> api key * Arg[2] --> test script file path * Arg[3] --> test data file path * Arg[4] --> test data class name (class to which test data file will be deserialized) * Arg[5] --> package where API classes are available * Arg[6] --> Language to execute test cases * Arg[7] --> Library location * Arg[8] --> Optional test cases id. provide this if you need to execute only one test case * * @param args * @throws Exception */ public static void main(String[] args) throws Exception { String apiServer = args[0]; if (!apiServer.endsWith("/")) { apiServer = apiServer + "/"; } String apiKey = args[1]; String testScriptLocation = args[2]; String testDataLocation = args[3]; String testDataClass = args[4].trim(); System.out.println("class" + testDataClass + "test"); String apiPackageName = args[5]; String libraryLocation = args[6]; String language = args[7]; String suiteId = "0"; if (args.length > 8) { suiteId = args[8]; } ApiKeyAuthTokenBasedSecurityHandler securityHandler = new ApiKeyAuthTokenBasedSecurityHandler(apiKey, ""); APIInvoker aAPIInvoker = APIInvoker.initialize(securityHandler, apiServer, true); APITestRunner runner = new APITestRunner(); runner.initialize(testScriptLocation, testDataLocation, testDataClass); runner.runTests(apiServer, apiPackageName, runner.getTestPackage(), language, new Integer(suiteId), apiPackageName, securityHandler, libraryLocation); }
From source file:com.aestel.chemistry.openEye.fp.Fingerprinter.java
public static void main(String... args) throws IOException { long start = System.currentTimeMillis(); long iCounter = 0; // create command line Options object Options options = new Options(); Option opt = new Option("in", true, "input file [.ism,.sdf,...]"); opt.setRequired(true);/* w ww .jav a2 s .c om*/ options.addOption(opt); opt = new Option("out", true, "output file .tsv or oe-supported"); opt.setRequired(true); options.addOption(opt); opt = new Option("idTag", true, "field with ID (default title)"); opt.setRequired(false); options.addOption(opt); opt = new Option("fpType", true, "fingerPrintType: maccs|linear7|linear7*4|HashLinear7*4\n" + " maccs: generate maccs keys\n" + " linear7 generate 7 bonds long linear fingerprints (210k known rest hashed)\n" + " linear7*4 linear 7 bonds if more than 4 atoms code atoms as * (5.1k known rest hashed)\n" + " HashLinear7*4: as linear7*4 but hashed to 16k"); opt.setRequired(true); options.addOption(opt); opt = new Option("format", true, "folded512|folded2048|bitList|fragList|none\n" + " folded512/2048: hex encoded 512/2048 bits\n" + " bitList: list of bitpositions\n" + " none: no fp output for use with writeCodeMap"); opt.setRequired(true); options.addOption(opt); opt = new Option("writeCodeMap", false, "Overwrite the codeMap file at the end of processing"); opt.setRequired(false); options.addOption(opt); CommandLineParser parser = new PosixParser(); CommandLine cmd = null; try { cmd = parser.parse(options, args); } catch (Exception e) { System.err.println(e.getMessage()); exitWithHelp(options); } args = cmd.getArgs(); if (cmd.hasOption("d")) { System.err.println("Start debugger and press return:"); new BufferedReader(new InputStreamReader(System.in)).readLine(); } String idTag = null; if (cmd.hasOption("idTag")) idTag = cmd.getOptionValue("idTag"); String outformat = cmd.getOptionValue("format").toLowerCase().intern(); if (args.length != 0) { exitWithHelp(options); } String type = cmd.getOptionValue("fpType"); boolean updateDictionaryFile = cmd.hasOption("writeCodeMap"); boolean hashUnknownFrag = true; if (type.equals("HashLinear7*4")) hashUnknownFrag = false; if (type.equals("maccs")) hashUnknownFrag = false; if (updateDictionaryFile) hashUnknownFrag = false; Fingerprinter fprinter = createFingerprinter(type, updateDictionaryFile, hashUnknownFrag); OEMolBase mol = new OEGraphMol(); String inFile = cmd.getOptionValue("in"); String outFile = cmd.getOptionValue("out"); oemolistream ifs = new oemolistream(inFile); Runtime rt = Runtime.getRuntime(); Outputter out; if (outFile.endsWith(".txt") || outFile.endsWith(".tab")) out = new TabOutputter(fprinter.getMapper(), outFile, outformat); else out = new OEOutputter(fprinter.getMapper(), outFile, type, outformat); while (oechem.OEReadMolecule(ifs, mol)) { iCounter++; Fingerprint fp = fprinter.getFingerprint(mol); String id; if (idTag == null) id = mol.GetTitle(); else id = oechem.OEGetSDData(mol, idTag); if (iCounter % 100 == 0) System.err.print("."); if (iCounter % 4000 == 0) { System.err.printf(" %d %dsec\tt=%d f=%d u=%d m=%d tf=%d\n", iCounter, (System.currentTimeMillis() - start) / 1000, rt.totalMemory() / 1024, rt.freeMemory() / 1024, (rt.totalMemory() - rt.freeMemory()) / 1024, rt.maxMemory() / 1024, (rt.freeMemory() + (rt.maxMemory() - rt.totalMemory())) / 1024); } out.output(id, mol, fp); } System.err.printf("Fingerprinter: Read %d structures in %d sec\n", iCounter, (System.currentTimeMillis() - start) / 1000); if (updateDictionaryFile) fprinter.writeDictionary(); out.close(); fprinter.close(); }
From source file:gov.nasa.jpl.mudrod.main.MudrodEngine.java
/** * Main program invocation. Accepts one argument denoting location (on disk) * to a log file which is to be ingested. Help will be provided if invoked * with incorrect parameters./*from w ww . j a v a2s . co m*/ * * @param args * {@link java.lang.String} array contaning correct parameters. */ public static void main(String[] args) { // boolean options Option helpOpt = new Option("h", "help", false, "show this help message"); // log ingest (preprocessing + processing) Option logIngestOpt = new Option("l", LOG_INGEST, false, "begin log ingest"); // metadata ingest (preprocessing + processing) Option metaIngestOpt = new Option("m", META_INGEST, false, "begin metadata ingest"); // ingest both log and metadata Option fullIngestOpt = new Option("f", FULL_INGEST, false, "begin full ingest Mudrod workflow"); // processing only, assuming that preprocessing results is in dataDir Option processingOpt = new Option("p", PROCESSING, false, "begin processing with preprocessing results"); // argument options Option dataDirOpt = OptionBuilder.hasArg(true).withArgName("/path/to/data/directory").hasArgs(1) .withDescription("the data directory to be processed by Mudrod").withLongOpt("dataDirectory") .isRequired().create(DATA_DIR); Option esHostOpt = OptionBuilder.hasArg(true).withArgName("host_name").hasArgs(1) .withDescription("elasticsearch cluster unicast host").withLongOpt("elasticSearchHost") .isRequired(false).create(ES_HOST); Option esTCPPortOpt = OptionBuilder.hasArg(true).withArgName("port_num").hasArgs(1) .withDescription("elasticsearch transport TCP port").withLongOpt("elasticSearchTransportTCPPort") .isRequired(false).create(ES_TCP_PORT); Option esPortOpt = OptionBuilder.hasArg(true).withArgName("port_num").hasArgs(1) .withDescription("elasticsearch HTTP/REST port").withLongOpt("elasticSearchHTTPPort") .isRequired(false).create(ES_HTTP_PORT); // create the options Options options = new Options(); options.addOption(helpOpt); options.addOption(logIngestOpt); options.addOption(metaIngestOpt); options.addOption(fullIngestOpt); options.addOption(processingOpt); options.addOption(dataDirOpt); options.addOption(esHostOpt); options.addOption(esTCPPortOpt); options.addOption(esPortOpt); CommandLineParser parser = new GnuParser(); try { CommandLine line = parser.parse(options, args); String processingType = null; if (line.hasOption(LOG_INGEST)) { processingType = LOG_INGEST; } else if (line.hasOption(PROCESSING)) { processingType = PROCESSING; } else if (line.hasOption(META_INGEST)) { processingType = META_INGEST; } else if (line.hasOption(FULL_INGEST)) { processingType = FULL_INGEST; } String dataDir = line.getOptionValue(DATA_DIR).replace("\\", "/"); if (!dataDir.endsWith("/")) { dataDir += "/"; } MudrodEngine me = new MudrodEngine(); me.loadConfig(); me.props.put(DATA_DIR, dataDir); if (line.hasOption(ES_HOST)) { String esHost = line.getOptionValue(ES_HOST); me.props.put(MudrodConstants.ES_UNICAST_HOSTS, esHost); } if (line.hasOption(ES_TCP_PORT)) { String esTcpPort = line.getOptionValue(ES_TCP_PORT); me.props.put(MudrodConstants.ES_TRANSPORT_TCP_PORT, esTcpPort); } if (line.hasOption(ES_HTTP_PORT)) { String esHttpPort = line.getOptionValue(ES_HTTP_PORT); me.props.put(MudrodConstants.ES_HTTP_PORT, esHttpPort); } me.es = new ESDriver(me.getConfig()); me.spark = new SparkDriver(me.getConfig()); loadFullConfig(me, dataDir); if (processingType != null) { switch (processingType) { case PROCESSING: me.startProcessing(); break; case LOG_INGEST: me.startLogIngest(); break; case META_INGEST: me.startMetaIngest(); break; case FULL_INGEST: me.startFullIngest(); break; default: break; } } me.end(); } catch (Exception e) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp( "MudrodEngine: 'dataDir' argument is mandatory. " + "User must also provide an ingest method.", options, true); LOG.error("Error whilst parsing command line.", e); } }
From source file:ktdiedrich.imagek.SegmentationCMD.java
/** * @author ktdiedrich@gmail.com/*from w w w.ja va 2s. c o m*/ * @param ags: [file path] [Median filter size] [imageId] * Command line segmentation * @throws org.apache.commons.cli.ParseException */ public static void main(String[] args) throws org.apache.commons.cli.ParseException { int imageIds[] = null; int medianFilterSize = 0; float seed = Extractor3D.SEED_HIST_THRES; String paths[] = null; Options options = new Options(); options.addOption("p", true, "path name to file including filename"); options.addOption("m", true, "median filter size, m*2+1"); options.addOption("i", true, "image ID"); options.addOption("f", true, "Image ID from"); options.addOption("t", true, "Image ID to, (inclusive)"); options.addOption("s", true, "Seed threshold default " + seed); CommandLineParser parser = new PosixParser(); CommandLine cmd = parser.parse(options, args); if (cmd.hasOption("s")) { seed = Float.parseFloat(cmd.getOptionValue("s")); } if (cmd.hasOption("i")) { imageIds = new int[1]; imageIds[0] = Integer.parseInt(cmd.getOptionValue("i")); paths = new String[1]; paths[0] = getImageIdPath(imageIds[0]); // TODO get path to image ID from database and properties file. } if (cmd.hasOption("f") && cmd.hasOption("t")) { int from = Integer.parseInt(cmd.getOptionValue("f")); int to = Integer.parseInt(cmd.getOptionValue("t")); int range = to - from + 1; paths = new String[range]; imageIds = new int[range]; for (int i = 0, imId = from; i < range; i++, imId++) { imageIds[i] = imId; paths[i] = getImageIdPath(imId); } } if (paths == null && cmd.hasOption("p")) { paths = new String[1]; paths[0] = cmd.getOptionValue("p"); } if (cmd.hasOption("m")) { medianFilterSize = Integer.parseInt(cmd.getOptionValue("m")); } // System.out.println("ImageID: "+imageId+" Path: "+paths+" Median filter: "+medianFilterSize); if (paths != null) { int i = 0; for (String path : paths) { String p[] = parseDirectoryFileName(path); String dirPath = p[0]; ImagePlus segImage = segment(path, medianFilterSize, imageIds[i], seed); String title = segImage.getShortTitle(); if (title.contains(File.separator)) ; { title = parseDirectoryFileName(title)[1]; } String outputPath = null; if (!dirPath.endsWith(File.separator)) dirPath = dirPath + File.separator; outputPath = dirPath + title + ".zip"; FileSaver fs = new FileSaver(segImage); fs.saveAsZip(outputPath); System.out.println("Saved: " + outputPath); ImagePlus mipYim = MIP.createShortMIP(segImage, MIP.Y_AXIS); fs = new FileSaver(mipYim); title = mipYim.getShortTitle(); if (title.contains(File.separator)) ; { title = parseDirectoryFileName(title)[1]; } outputPath = dirPath + title + ".png"; fs.saveAsPng(outputPath); System.out.println("Saved: " + outputPath + "\n"); i++; } } }
From source file:de.bamamoto.mactools.png2icns.Scaler.java
public static void main(String[] args) { Options options = new Options(); options.addOption("i", "input-filename", true, "Filename ofthe image containing the icon. The image should be a square with at least 1024x124 pixel in PNG format."); options.addOption("o", "iconset-foldername", true, "Name of the folder where the iconset will be stored. The extension .iconset will be added automatically."); String folderName; CommandLineParser parser = new DefaultParser(); CommandLine cmd = null;//w ww . j a v a 2 s .c om try { cmd = parser.parse(options, args); if (cmd.hasOption("i")) { if (new File(cmd.getOptionValue("i")).isFile()) { if (cmd.hasOption("o")) { folderName = cmd.getOptionValue("o"); } else { folderName = "/tmp/noname.iconset"; } if (!folderName.endsWith(".iconset")) { folderName = folderName + ".iconset"; } new File(folderName).mkdirs(); BufferedImage source = ImageIO.read(new File(cmd.getOptionValue("i"))); BufferedImage resized = resize(source, 1024, 1024); save(resized, folderName + "/icon_512x512@2x.png"); resized = resize(source, 512, 512); save(resized, folderName + "/icon_512x512.png"); save(resized, folderName + "/icon_256x256@2x.png"); resized = resize(source, 256, 256); save(resized, folderName + "/icon_256x256.png"); save(resized, folderName + "/icon_128x128@2x.png"); resized = resize(source, 128, 128); save(resized, folderName + "/icon_128x128.png"); resized = resize(source, 64, 64); save(resized, folderName + "/icon_32x32@2x.png"); resized = resize(source, 32, 32); save(resized, folderName + "/icon_32x32.png"); save(resized, folderName + "/icon_16x16@2x.png"); resized = resize(source, 16, 16); save(resized, folderName + "/icon_16x16.png"); Scaler.runProcess(new String[] { "/usr/bin/iconutil", "-c", "icns", folderName }); } } } catch (IOException e) { System.out.println("Error reading image: " + cmd.getOptionValue("i")); e.printStackTrace(); } catch (ParseException ex) { Logger.getLogger(Scaler.class.getName()).log(Level.SEVERE, null, ex); } }
From source file:ireport_5_6_0.view.JasperDesignViewer.java
/** * @param args the command line arguments *///from ww w . jav a2 s . c o m public static void main(String args[]) { String fileName = null; boolean isXMLFile = false; for (int i = 0; i < args.length; i++) { if (args[i].startsWith("-XML")) { isXMLFile = true; } else if (args[i].startsWith("-F")) { fileName = args[i].substring(2); } else { fileName = args[i]; } } if (fileName == null) { usage(); return; } if (!isXMLFile && fileName.endsWith(".jrxml")) { isXMLFile = true; } try { viewReportDesign(fileName, isXMLFile); } catch (JRException e) { if (log.isErrorEnabled()) { log.error("Error viewing report design.", e); } System.exit(1); } }
From source file:com.finderbots.miner2.pinterest.PinterestCrawlAndMinerTool.java
public static void main(String[] args) { Options options = new Options(); CmdLineParser parser = new CmdLineParser(options); try {//from ww w . j a v a 2 s. co m parser.parseArgument(args); } catch (CmdLineException e) { System.err.println(e.getMessage()); printUsageAndExit(parser); } // Before we get too far along, see if the domain looks valid. String domain = options.getDomain(); String urlsFile = options.getUrlsFile(); if (domain != null) { validateDomain(domain, parser); } else { if (urlsFile == null) { System.err.println( "Either a target domain should be specified or a file with a list of urls needs to be provided"); printUsageAndExit(parser); } } if (domain != null && urlsFile != null) { System.out.println("Warning: Both domain and urls file list provided - using domain"); } String outputDirName = options.getOutputDir(); if (options.isDebugLogging()) { System.setProperty("bixo.root.level", "DEBUG"); } else { System.setProperty("bixo.root.level", "INFO"); } if (options.getLoggingAppender() != null) { // Set console vs. DRFA vs. something else System.setProperty("bixo.appender", options.getLoggingAppender()); } String logsDir = options.getLogsDir(); if (!logsDir.endsWith("/")) { logsDir = logsDir + "/"; } try { JobConf conf = new JobConf(); Path outputPath = new Path(outputDirName); FileSystem fs = outputPath.getFileSystem(conf); // First check if the user wants to clean if (options.isCleanOutputDir()) { if (fs.exists(outputPath)) { fs.delete(outputPath, true); } } // See if the user isn't starting from scratch then set up the // output directory and create an initial urls subdir. if (!fs.exists(outputPath)) { fs.mkdirs(outputPath); // Create a "0-<timestamp>" sub-directory with just a /crawldb subdir // In the /crawldb dir the input file will have a single URL for the target domain. Path curLoopDir = CrawlDirUtils.makeLoopDir(fs, outputPath, 0); String curLoopDirName = curLoopDir.getName(); setLoopLoggerFile(logsDir + curLoopDirName, 0); Path crawlDbPath = new Path(curLoopDir, CrawlConfig.CRAWLDB_SUBDIR_NAME); if (domain != null) { importOneDomain(domain, crawlDbPath, conf); } else { importUrls(urlsFile, crawlDbPath); } } Path latestDirPath = CrawlDirUtils.findLatestLoopDir(fs, outputPath); if (latestDirPath == null) { System.err.println("No previous cycle output dirs exist in " + outputDirName); printUsageAndExit(parser); } Path crawlDbPath = new Path(latestDirPath, CrawlConfig.CRAWLDB_SUBDIR_NAME); // Set up the start and end loop counts. int startLoop = CrawlDirUtils.extractLoopNumber(latestDirPath); int endLoop = startLoop + options.getNumLoops(); // Set up the UserAgent for the fetcher. UserAgent userAgent = new UserAgent(options.getAgentName(), CrawlConfig.EMAIL_ADDRESS, CrawlConfig.WEB_ADDRESS); // You also get to customize the FetcherPolicy FetcherPolicy defaultPolicy; if (options.getCrawlDuration() != 0) { defaultPolicy = new AdaptiveFetcherPolicy(options.getEndCrawlTime(), options.getCrawlDelay()); } else { defaultPolicy = new FetcherPolicy(); } defaultPolicy.setMaxContentSize(CrawlConfig.MAX_CONTENT_SIZE); defaultPolicy.setRequestTimeout(10L * 1000L);//10 seconds // COMPLETE for crawling a single site, EFFICIENT for many sites if (options.getCrawlPolicy().equals(Options.IMPOLITE_CRAWL_POLICY)) { defaultPolicy.setFetcherMode(FetcherPolicy.FetcherMode.IMPOLITE); } else if (options.getCrawlPolicy().equals(Options.EFFICIENT_CRAWL_POLICY)) { defaultPolicy.setFetcherMode(FetcherPolicy.FetcherMode.EFFICIENT); } else if (options.getCrawlPolicy().equals(Options.COMPLETE_CRAWL_POLICY)) { defaultPolicy.setFetcherMode(FetcherPolicy.FetcherMode.COMPLETE); } // It is a good idea to set up a crawl duration when running long crawls as you may // end up in situations where the fetch slows down due to a 'long tail' and by // specifying a crawl duration you know exactly when the crawl will end. int crawlDurationInMinutes = options.getCrawlDuration(); boolean hasEndTime = crawlDurationInMinutes != Options.NO_CRAWL_DURATION; long targetEndTime = hasEndTime ? System.currentTimeMillis() + (crawlDurationInMinutes * CrawlConfig.MILLISECONDS_PER_MINUTE) : FetcherPolicy.NO_CRAWL_END_TIME; // By setting up a url filter we only deal with urls that we want to // instead of all the urls that we extract. BaseUrlFilter urlFilter = null; List<String> patterns = null; String regexUrlFiltersFile = options.getRegexUrlFiltersFile(); if (regexUrlFiltersFile != null) { patterns = RegexUrlDatumFilter.getUrlFilterPatterns(regexUrlFiltersFile); } else { patterns = RegexUrlDatumFilter.getDefaultUrlFilterPatterns(); if (domain != null) { String domainPatterStr = "+(?i)^(http|https)://([a-z0-9]*\\.)*" + domain; patterns.add(domainPatterStr); } else { String protocolPatterStr = "+(?i)^(http|https)://*"; patterns.add(protocolPatterStr); //Log.warn("Defaulting to basic url regex filtering (just suffix and protocol"); } } urlFilter = new RegexUrlDatumFilter(patterns.toArray(new String[patterns.size()])); // get a list of patterns which tell the miner which URLs to include or exclude. patterns.clear(); RegexUrlStringFilter urlsToMineFilter = null; String regexUrlsToMineFiltersFile = options.getRegexUrlToMineFile(); AnalyzeHtml analyzer = null; if (regexUrlsToMineFiltersFile != null) { patterns = RegexUrlDatumFilter.getUrlFilterPatterns(regexUrlsToMineFiltersFile); urlsToMineFilter = new RegexUrlStringFilter(patterns.toArray(new String[patterns.size()])); analyzer = new AnalyzeHtml(urlsToMineFilter); } // OK, now we're ready to start looping, since we've got our current // settings for (int curLoop = startLoop + 1; curLoop <= endLoop; curLoop++) { // Adjust target end time, if appropriate. if (hasEndTime) { int remainingLoops = (endLoop - curLoop) + 1; long now = System.currentTimeMillis(); long perLoopTime = (targetEndTime - now) / remainingLoops; defaultPolicy.setCrawlEndTime(now + perLoopTime); } Path curLoopDirPath = CrawlDirUtils.makeLoopDir(fs, outputPath, curLoop); String curLoopDirName = curLoopDirPath.getName(); setLoopLoggerFile(logsDir + curLoopDirName, curLoop); Flow flow = PinterestCrawlAndMinerWorkflow.createFlow(curLoopDirPath, crawlDbPath, defaultPolicy, userAgent, urlFilter, analyzer, options); flow.complete(); // Writing out .dot files is a good way to verify your flows. flow.writeDOT("valid-flow.dot"); // Update crawlDbPath to point to the latest crawl db crawlDbPath = new Path(curLoopDirPath, CrawlConfig.CRAWLDB_SUBDIR_NAME); } } catch (PlannerException e) { e.writeDOT("failed-flow.dot"); System.err.println("PlannerException: " + e.getMessage()); e.printStackTrace(System.err); System.exit(-1); } catch (Throwable t) { System.err.println("Exception running tool: " + t.getMessage()); t.printStackTrace(System.err); System.exit(-1); } }
From source file:com.finderbots.miner2.tomatoes.RTCriticsCrawlAndMinerTool.java
public static void main(String[] args) { Options options = new Options(); CmdLineParser parser = new CmdLineParser(options); try {/*from ww w. j a v a 2s. c o m*/ parser.parseArgument(args); } catch (CmdLineException e) { System.err.println(e.getMessage()); printUsageAndExit(parser); } // Before we get too far along, see if the domain looks valid. String domain = options.getDomain(); String urlsFile = options.getUrlsFile(); if (domain != null) { validateDomain(domain, parser); } else { if (urlsFile == null) { System.err.println( "Either a target domain should be specified or a file with a list of urls needs to be provided"); printUsageAndExit(parser); } } if (domain != null && urlsFile != null) { System.out.println("Warning: Both domain and urls file list provided - using domain"); } String outputDirName = options.getOutputDir(); if (options.isDebugLogging()) { System.setProperty("bixo.root.level", "DEBUG"); } else { System.setProperty("bixo.root.level", "INFO"); } if (options.getLoggingAppender() != null) { // Set console vs. DRFA vs. something else System.setProperty("bixo.appender", options.getLoggingAppender()); } String logsDir = options.getLogsDir(); if (!logsDir.endsWith("/")) { logsDir = logsDir + "/"; } try { JobConf conf = new JobConf(); Path outputPath = new Path(outputDirName); FileSystem fs = outputPath.getFileSystem(conf); // First check if the user wants to clean if (options.isCleanOutputDir()) { if (fs.exists(outputPath)) { fs.delete(outputPath, true); } } // See if the user isn't starting from scratch then set up the // output directory and create an initial urls subdir. if (!fs.exists(outputPath)) { fs.mkdirs(outputPath); // Create a "0-<timestamp>" sub-directory with just a /crawldb subdir // In the /crawldb dir the input file will have a single URL for the target domain. Path curLoopDir = CrawlDirUtils.makeLoopDir(fs, outputPath, 0); String curLoopDirName = curLoopDir.getName(); setLoopLoggerFile(logsDir + curLoopDirName, 0); Path crawlDbPath = new Path(curLoopDir, CrawlConfig.CRAWLDB_SUBDIR_NAME); if (domain != null) { importOneDomain(domain, crawlDbPath, conf); } else { importUrls(urlsFile, crawlDbPath); } } Path latestDirPath = CrawlDirUtils.findLatestLoopDir(fs, outputPath); if (latestDirPath == null) { System.err.println("No previous cycle output dirs exist in " + outputDirName); printUsageAndExit(parser); } Path crawlDbPath = new Path(latestDirPath, CrawlConfig.CRAWLDB_SUBDIR_NAME); // Set up the start and end loop counts. int startLoop = CrawlDirUtils.extractLoopNumber(latestDirPath); int endLoop = startLoop + options.getNumLoops(); // Set up the UserAgent for the fetcher. UserAgent userAgent = new UserAgent(options.getAgentName(), CrawlConfig.EMAIL_ADDRESS, CrawlConfig.WEB_ADDRESS); // You also get to customize the FetcherPolicy FetcherPolicy defaultPolicy; if (options.getCrawlDuration() != 0) { defaultPolicy = new AdaptiveFetcherPolicy(options.getEndCrawlTime(), options.getCrawlDelay()); } else { defaultPolicy = new FetcherPolicy(); } defaultPolicy.setMaxContentSize(CrawlConfig.MAX_CONTENT_SIZE); defaultPolicy.setRequestTimeout(10L * 1000L);//10 seconds // COMPLETE for crawling a single site, EFFICIENT for many sites if (options.getCrawlPolicy().equals(Options.IMPOLITE_CRAWL_POLICY)) { defaultPolicy.setFetcherMode(FetcherPolicy.FetcherMode.IMPOLITE); } else if (options.getCrawlPolicy().equals(Options.EFFICIENT_CRAWL_POLICY)) { defaultPolicy.setFetcherMode(FetcherPolicy.FetcherMode.EFFICIENT); } else if (options.getCrawlPolicy().equals(Options.COMPLETE_CRAWL_POLICY)) { defaultPolicy.setFetcherMode(FetcherPolicy.FetcherMode.COMPLETE); } // It is a good idea to set up a crawl duration when running long crawls as you may // end up in situations where the fetch slows down due to a 'long tail' and by // specifying a crawl duration you know exactly when the crawl will end. int crawlDurationInMinutes = options.getCrawlDuration(); boolean hasEndTime = crawlDurationInMinutes != Options.NO_CRAWL_DURATION; long targetEndTime = hasEndTime ? System.currentTimeMillis() + (crawlDurationInMinutes * CrawlConfig.MILLISECONDS_PER_MINUTE) : FetcherPolicy.NO_CRAWL_END_TIME; // By setting up a url filter we only deal with urls that we want to // instead of all the urls that we extract. BaseUrlFilter urlFilter = null; List<String> patterns = null; String regexUrlFiltersFile = options.getRegexUrlFiltersFile(); if (regexUrlFiltersFile != null) { patterns = RegexUrlDatumFilter.getUrlFilterPatterns(regexUrlFiltersFile); } else { patterns = RegexUrlDatumFilter.getDefaultUrlFilterPatterns(); if (domain != null) { String domainPatterStr = "+(?i)^(http|https)://([a-z0-9]*\\.)*" + domain; patterns.add(domainPatterStr); } else { String protocolPatterStr = "+(?i)^(http|https)://*"; patterns.add(protocolPatterStr); //Log.warn("Defaulting to basic url regex filtering (just suffix and protocol"); } } urlFilter = new RegexUrlDatumFilter(patterns.toArray(new String[patterns.size()])); // get a list of patterns which tell the miner which URLs to include or exclude. patterns.clear(); RegexUrlStringFilter urlsToMineFilter = null; String regexUrlsToMineFiltersFile = options.getRegexUrlToMineFile(); MineRTCriticsPreferences prefsAnalyzer = null; if (regexUrlsToMineFiltersFile != null) { patterns = RegexUrlDatumFilter.getUrlFilterPatterns(regexUrlsToMineFiltersFile); urlsToMineFilter = new RegexUrlStringFilter(patterns.toArray(new String[patterns.size()])); prefsAnalyzer = new MineRTCriticsPreferences(urlsToMineFilter); } // OK, now we're ready to start looping, since we've got our current // settings for (int curLoop = startLoop + 1; curLoop <= endLoop; curLoop++) { // Adjust target end time, if appropriate. if (hasEndTime) { int remainingLoops = (endLoop - curLoop) + 1; long now = System.currentTimeMillis(); long perLoopTime = (targetEndTime - now) / remainingLoops; defaultPolicy.setCrawlEndTime(now + perLoopTime); } Path curLoopDirPath = CrawlDirUtils.makeLoopDir(fs, outputPath, curLoop); String curLoopDirName = curLoopDirPath.getName(); setLoopLoggerFile(logsDir + curLoopDirName, curLoop); Flow flow = RTCriticsCrawlAndMinerWorkflow.createFlow(curLoopDirPath, crawlDbPath, defaultPolicy, userAgent, urlFilter, prefsAnalyzer, options); flow.complete(); // Writing out .dot files is a good way to verify your flows. flow.writeDOT("valid-flow.dot"); // Update crawlDbPath to point to the latest crawl db crawlDbPath = new Path(curLoopDirPath, CrawlConfig.CRAWLDB_SUBDIR_NAME); } } catch (PlannerException e) { e.writeDOT("failed-flow.dot"); System.err.println("PlannerException: " + e.getMessage()); e.printStackTrace(System.err); System.exit(-1); } catch (Throwable t) { System.err.println("Exception running tool: " + t.getMessage()); t.printStackTrace(System.err); System.exit(-1); } }