List of usage examples for java.io File isDirectory
public boolean isDirectory()
From source file:net.cloudkit.relaxation.CaptchaTest.java
public static void main(String[] args) { // System.out.println(Color.WHITE.getRGB()); try {/*from w ww . j a v a 2 s.c om*/ File fileDirectory = new File("D:\\customs\\"); File[] files = fileDirectory.isDirectory() ? fileDirectory.listFiles() : new File[0]; for (int a = 0; a < files.length; a++) { if (files[a].isDirectory()) continue; InputStream inputStream = new FileInputStream(files[a]); BufferedImage bi = ImageIO.read(inputStream); clearBorder(bi); clearNoise(bi); // List<BufferedImage> subImgs = splitImage(bi); /* for (int i = 0; i < subImgs.size(); i++) { File imageFile = new File("D:\\images\\" + i + ".gif"); ImageIO.write(subImgs.get(i), "gif", imageFile); } */ FileOutputStream fos = new FileOutputStream("D:\\images\\" + files[a].getName().substring(0, files[a].getName().indexOf(".")) + a + ".jpg"); JPEGImageEncoder encoder = JPEGCodec.createJPEGEncoder(fos); encoder.encode(bi); fos.close(); // File file2 = new File("D:\\images\\" + a + "_" + files[a].getName()); // ImageIO.write(bi, "gif", file2); } /* CloseableHttpClient httpclient = HttpClients.createDefault(); for (int i = 0; i < 1000; i++) { HttpGet httpGet = new HttpGet("http://query.customs.gov.cn/MNFTQ/Image.aspx?" + Math.random() * 1000); CloseableHttpResponse response1 = httpclient.execute(httpGet); // The underlying HTTP connection is still held by the response object // to allow the response content to be streamed directly from the network socket. // In order to ensure correct deallocation of system resources // the user MUST call CloseableHttpResponse#close() from a finally clause. // Please note that if response content is not fully consumed the underlying // connection cannot be safely re-used and will be shut down and discarded // by the connection manager. try { System.out.println(response1.getStatusLine()); HttpEntity entity1 = response1.getEntity(); // do something useful with the response body // and ensure it is fully consumed InputStream input = entity1.getContent(); File storeFile = new File("D:\\customs\\customs"+ i +".jpg"); FileOutputStream output = new FileOutputStream(storeFile); IOUtils.copy(input, output); output.close(); EntityUtils.consume(entity1); } finally { response1.close(); } } */ } catch (Exception e) { e.printStackTrace(); } }
From source file:com.act.biointerpretation.l2expansion.L2FilteringDriver.java
public static void main(String[] args) throws Exception { // Build command line parser. Options opts = new Options(); for (Option.Builder b : OPTION_BUILDERS) { opts.addOption(b.build());//from w ww .ja v a 2 s . com } CommandLine cl = null; try { CommandLineParser parser = new DefaultParser(); cl = parser.parse(opts, args); } catch (ParseException e) { LOGGER.error("Argument parsing failed: %s", e.getMessage()); HELP_FORMATTER.printHelp(L2FilteringDriver.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); System.exit(1); } // Print help. if (cl.hasOption(OPTION_HELP)) { HELP_FORMATTER.printHelp(L2FilteringDriver.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); return; } checkFilterOptionIsValid(OPTION_CHEMICAL_FILTER, cl); checkFilterOptionIsValid(OPTION_REACTION_FILTER, cl); // Get corpus files. File corpusFile = new File(cl.getOptionValue(OPTION_INPUT_CORPUS)); if (!corpusFile.exists()) { LOGGER.error("Input corpus file does not exist."); return; } File outputFile = new File(cl.getOptionValue(OPTION_OUTPUT_PATH)); outputFile.createNewFile(); if (outputFile.isDirectory()) { LOGGER.error("Output file is directory."); System.exit(1); } LOGGER.info("Reading corpus from file."); L2PredictionCorpus predictionCorpus = L2PredictionCorpus.readPredictionsFromJsonFile(corpusFile); LOGGER.info("Read in corpus with %d predictions.", predictionCorpus.getCorpus().size()); LOGGER.info("Corpus has %d distinct substrates.", predictionCorpus.getUniqueSubstrateInchis().size()); if (cl.hasOption(OPTION_FILTER_SUBSTRATES)) { LOGGER.info("Filtering by substrates."); File substratesFile = new File(cl.getOptionValue(OPTION_FILTER_SUBSTRATES)); L2InchiCorpus inchis = new L2InchiCorpus(); inchis.loadCorpus(substratesFile); Set<String> inchiSet = new HashSet<String>(); inchiSet.addAll(inchis.getInchiList()); predictionCorpus = predictionCorpus .applyFilter(prediction -> inchiSet.containsAll(prediction.getSubstrateInchis())); predictionCorpus.writePredictionsToJsonFile(outputFile); LOGGER.info("Done writing filtered corpus to file."); return; } if (cl.hasOption(OPTION_SPLIT_BY_RO)) { LOGGER.info("Splitting corpus into distinct corpuses for each ro."); Map<String, L2PredictionCorpus> corpusMap = predictionCorpus .splitCorpus(prediction -> prediction.getProjectorName()); for (Map.Entry<String, L2PredictionCorpus> entry : corpusMap.entrySet()) { String fileName = cl.getOptionValue(OPTION_OUTPUT_PATH) + "." + entry.getKey(); File oneOutputFile = new File(fileName); entry.getValue().writePredictionsToJsonFile(oneOutputFile); } LOGGER.info("Done writing split corpuses to file."); return; } predictionCorpus = runDbLookups(cl, predictionCorpus, opts); LOGGER.info("Applying filters."); predictionCorpus = applyFilter(predictionCorpus, ALL_CHEMICALS_IN_DB, cl, OPTION_CHEMICAL_FILTER); predictionCorpus = applyFilter(predictionCorpus, REACTION_MATCHES_DB, cl, OPTION_REACTION_FILTER); LOGGER.info("Filtered corpus has %d predictions.", predictionCorpus.getCorpus().size()); LOGGER.info("Printing final corpus."); predictionCorpus.writePredictionsToJsonFile(outputFile); LOGGER.info("L2FilteringDriver complete!."); }
From source file:com.ctriposs.rest4j.tools.data.FilterSchemaGenerator.java
public static void main(String[] args) { final CommandLineParser parser = new GnuParser(); CommandLine cl = null;// www .jav a 2 s. c o m try { cl = parser.parse(_options, args); } catch (ParseException e) { _log.error("Invalid arguments: " + e.getMessage()); reportInvalidArguments(); } final String[] directoryArgs = cl.getArgs(); if (directoryArgs.length != 2) { reportInvalidArguments(); } final File sourceDirectory = new File(directoryArgs[0]); if (!sourceDirectory.exists()) { _log.error(sourceDirectory.getPath() + " does not exist"); System.exit(1); } if (!sourceDirectory.isDirectory()) { _log.error(sourceDirectory.getPath() + " is not a directory"); System.exit(1); } final URI sourceDirectoryURI = sourceDirectory.toURI(); final File outputDirectory = new File(directoryArgs[1]); if (outputDirectory.exists() && !sourceDirectory.isDirectory()) { _log.error(outputDirectory.getPath() + " is not a directory"); System.exit(1); } final boolean isAvroMode = cl.hasOption('a'); final String predicateExpression = cl.getOptionValue('e'); final Predicate predicate = PredicateExpressionParser.parse(predicateExpression); final Collection<File> sourceFiles = FileUtil.listFiles(sourceDirectory, null); int exitCode = 0; for (File sourceFile : sourceFiles) { try { final ValidationOptions val = new ValidationOptions(); val.setAvroUnionMode(isAvroMode); final SchemaParser schemaParser = new SchemaParser(); schemaParser.setValidationOptions(val); schemaParser.parse(new FileInputStream(sourceFile)); if (schemaParser.hasError()) { _log.error("Error parsing " + sourceFile.getPath() + ": " + schemaParser.errorMessageBuilder().toString()); exitCode = 1; continue; } final DataSchema originalSchema = schemaParser.topLevelDataSchemas().get(0); if (!(originalSchema instanceof NamedDataSchema)) { _log.error(sourceFile.getPath() + " does not contain valid NamedDataSchema"); exitCode = 1; continue; } final SchemaParser filterParser = new SchemaParser(); filterParser.setValidationOptions(val); final NamedDataSchema filteredSchema = Filters.removeByPredicate((NamedDataSchema) originalSchema, predicate, filterParser); if (filterParser.hasError()) { _log.error("Error applying predicate: " + filterParser.errorMessageBuilder().toString()); exitCode = 1; continue; } final String relativePath = sourceDirectoryURI.relativize(sourceFile.toURI()).getPath(); final String outputFilePath = outputDirectory.getPath() + File.separator + relativePath; final File outputFile = new File(outputFilePath); final File outputFileParent = outputFile.getParentFile(); outputFileParent.mkdirs(); if (!outputFileParent.exists()) { _log.error("Unable to write filtered schema to " + outputFileParent.getPath()); exitCode = 1; continue; } FileOutputStream fout = new FileOutputStream(outputFile); fout.write(filteredSchema.toString().getBytes(RestConstants.DEFAULT_CHARSET)); fout.close(); } catch (IOException e) { _log.error(e.getMessage()); exitCode = 1; } } System.exit(exitCode); }
From source file:com.linkedin.restli.tools.data.FilterSchemaGenerator.java
public static void main(String[] args) { CommandLine cl = null;/*from w w w .j a va 2 s . com*/ try { final CommandLineParser parser = new GnuParser(); cl = parser.parse(_options, args); } catch (ParseException e) { _log.error("Invalid arguments: " + e.getMessage()); reportInvalidArguments(); } final String[] directoryArgs = cl.getArgs(); if (directoryArgs.length != 2) { reportInvalidArguments(); } final File sourceDirectory = new File(directoryArgs[0]); if (!sourceDirectory.exists()) { _log.error(sourceDirectory.getPath() + " does not exist"); System.exit(1); } if (!sourceDirectory.isDirectory()) { _log.error(sourceDirectory.getPath() + " is not a directory"); System.exit(1); } final URI sourceDirectoryURI = sourceDirectory.toURI(); final File outputDirectory = new File(directoryArgs[1]); if (outputDirectory.exists() && !sourceDirectory.isDirectory()) { _log.error(outputDirectory.getPath() + " is not a directory"); System.exit(1); } final boolean isAvroMode = cl.hasOption('a'); final String predicateExpression = cl.getOptionValue('e'); final Predicate predicate = PredicateExpressionParser.parse(predicateExpression); final Collection<File> sourceFiles = FileUtil.listFiles(sourceDirectory, null); int exitCode = 0; for (File sourceFile : sourceFiles) { try { final ValidationOptions val = new ValidationOptions(); val.setAvroUnionMode(isAvroMode); final SchemaParser schemaParser = new SchemaParser(); schemaParser.setValidationOptions(val); schemaParser.parse(new FileInputStream(sourceFile)); if (schemaParser.hasError()) { _log.error("Error parsing " + sourceFile.getPath() + ": " + schemaParser.errorMessageBuilder()); exitCode = 1; continue; } final DataSchema originalSchema = schemaParser.topLevelDataSchemas().get(0); if (!(originalSchema instanceof NamedDataSchema)) { _log.error(sourceFile.getPath() + " does not contain valid NamedDataSchema"); exitCode = 1; continue; } final SchemaParser filterParser = new SchemaParser(); filterParser.setValidationOptions(val); final NamedDataSchema filteredSchema = Filters.removeByPredicate((NamedDataSchema) originalSchema, predicate, filterParser); if (filterParser.hasError()) { _log.error("Error applying predicate: " + filterParser.errorMessageBuilder()); exitCode = 1; continue; } final String relativePath = sourceDirectoryURI.relativize(sourceFile.toURI()).getPath(); final String outputFilePath = outputDirectory.getPath() + File.separator + relativePath; final File outputFile = new File(outputFilePath); final File outputFileParent = outputFile.getParentFile(); outputFileParent.mkdirs(); if (!outputFileParent.exists()) { _log.error("Unable to write filtered schema to " + outputFileParent.getPath()); exitCode = 1; continue; } FileOutputStream fout = new FileOutputStream(outputFile); String schemaJson = SchemaToJsonEncoder.schemaToJson(filteredSchema, JsonBuilder.Pretty.INDENTED); fout.write(schemaJson.getBytes(RestConstants.DEFAULT_CHARSET)); fout.close(); } catch (IOException e) { _log.error(e.getMessage()); exitCode = 1; } } System.exit(exitCode); }
From source file:iac.cnr.it.TestSearcher.java
public static void main(String[] args) throws IOException, ParseException { /** Command line parser and options */ CommandLineParser parser = new PosixParser(); Options options = new Options(); options.addOption(OPT_INDEX, true, "Index path"); options.addOption(OPT_QUERY, true, "The query"); CommandLine cmd = null;//from w w w . j a v a 2 s . c o m try { cmd = parser.parse(options, args); } catch (org.apache.commons.cli.ParseException e) { logger.fatal("Error while parsing command line arguments"); System.exit(1); } /** Check for mandatory options */ if (!cmd.hasOption(OPT_INDEX) || !cmd.hasOption(OPT_QUERY)) { usage(); System.exit(0); } /** Read options */ File casePath = new File(cmd.getOptionValue(OPT_INDEX)); String query = cmd.getOptionValue(OPT_QUERY); /** Check correctness of the path containing an ISODAC case */ if (!casePath.exists() || !casePath.isDirectory()) { logger.fatal("The case directory \"" + casePath.getAbsolutePath() + "\" is not valid"); System.exit(1); } /** Check existance of the info.dat file */ File infoFile = new File(casePath, INFO_FILENAME); if (!infoFile.exists()) { logger.fatal("Can't find " + INFO_FILENAME + " within the case directory (" + casePath + ")"); System.exit(1); } /** Load the mapping image_uuid --> image_filename */ imagesMap = new HashMap<Integer, String>(); BufferedReader reader = new BufferedReader(new FileReader(infoFile)); while (reader.ready()) { String line = reader.readLine(); logger.info("Read the line: " + line); String currentID = line.split("\t")[0]; String currentImgFile = line.split("\t")[1]; imagesMap.put(Integer.parseInt(currentID), currentImgFile); logger.info("ID: " + currentID + " - IMG: " + currentImgFile + " added to the map"); } reader.close(); /** Load all the directories containing an index */ ArrayList<String> indexesDirs = new ArrayList<String>(); for (File f : casePath.listFiles()) { logger.info("Analyzing: " + f); if (f.isDirectory()) indexesDirs.add(f.getAbsolutePath()); } logger.info(indexesDirs.size() + " directories found!"); /** Set-up the searcher */ Searcher searcher = null; try { String[] array = indexesDirs.toArray(new String[indexesDirs.size()]); searcher = new Searcher(array); TopDocs results = searcher.search(query, Integer.MAX_VALUE); ScoreDoc[] hits = results.scoreDocs; int numTotalHits = results.totalHits; System.out.println(numTotalHits + " total matching documents"); for (int i = 0; i < numTotalHits; i++) { Document doc = searcher.doc(hits[i].doc); String path = doc.get(FIELD_PATH); String filename = doc.get(FIELD_FILENAME); String image_uuid = doc.get(FIELD_IMAGE_ID); if (path != null) { //System.out.println((i + 1) + ". " + path + File.separator + filename + " - score: " + hits[i].score); // System.out.println((i + 1) + ". " + path + File.separator + filename + " - image_file: " + image_uuid); System.out.println((i + 1) + ". " + path + File.separator + filename + " - image_file: " + imagesMap.get(Integer.parseInt(image_uuid))); } else { System.out.println((i + 1) + ". " + "No path for this document"); } } } catch (Exception e) { System.err.println("An error occurred: " + e.getMessage()); e.printStackTrace(); } finally { if (searcher != null) searcher.close(); } }
From source file:edu.msu.cme.rdp.classifier.train.validation.distance.TaxaSimilarityMain.java
/** * This calculates the average similarity (Sab score or pairwise alignment) between taxa at given ranks and plot the box and whisker plot and accumulation curve. * The distances associate to a given rank contains the distances between different child taxa. It does not include the distances within the same child taxa. * For example, if a query and it's closest match are from the same genus, the distance value is added to that genus. * If there are from different genera but the same family, the distance value is added to that family, etc. * @param args//from www . ja v a 2s .c o m * @throws IOException */ public static void main(String[] args) throws IOException, OverlapCheckFailedException { String usage = "Usage: taxonfile trainset.fasta query.fasta outdir kmersize rankFile sab|pw \n" + " This program calculates the average similarity (Sab score, or pairwise alignment) within taxa\n" + " and plot the box and whisker plot and accumulation curve plot. \n" + " rankFile: a file contains a list of ranks to be calculated and plotted. One rank per line, no particular order required. \n" + " Note pw is extremely slower, recommended only for lower ranks such as species, genus and family. "; if (args.length != 7) { System.err.println(usage); System.exit(1); } List<String> ranks = readRanks(args[5]); File outdir = new File(args[3]); if (!outdir.isDirectory()) { System.err.println("outdir must be a directory"); System.exit(1); } int kmer = Integer.parseInt(args[4]); GoodWordIterator.setWordSize(kmer); TaxaSimilarityMain theObj = new TaxaSimilarityMain(ranks); String plotTitle = new File(args[2]).getName(); int index = plotTitle.indexOf("."); if (index != -1) { plotTitle = plotTitle.substring(0, index); } if (args[6].equalsIgnoreCase("sab")) { theObj.calSabSimilarity(args[0], args[1], args[2]); } else { theObj.calPairwiseSimilaritye(args[0], args[1], args[2]); } theObj.createPlot(plotTitle, outdir); }
From source file:com.act.lcms.db.io.PrintConstructInfo.java
public static void main(String[] args) throws Exception { Options opts = new Options(); for (Option.Builder b : OPTION_BUILDERS) { opts.addOption(b.build());/*w w w . j a v a 2 s . co m*/ } CommandLine cl = null; try { CommandLineParser parser = new DefaultParser(); cl = parser.parse(opts, args); } catch (ParseException e) { System.err.format("Argument parsing failed: %s\n", e.getMessage()); HELP_FORMATTER.printHelp(LoadPlateCompositionIntoDB.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); System.exit(1); } if (cl.hasOption("help")) { HELP_FORMATTER.printHelp(LoadPlateCompositionIntoDB.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); return; } File lcmsDir = new File(cl.getOptionValue(OPTION_DIRECTORY)); if (!lcmsDir.isDirectory()) { System.err.format("File at %s is not a directory\n", lcmsDir.getAbsolutePath()); HELP_FORMATTER.printHelp(LoadPlateCompositionIntoDB.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); System.exit(1); } try (DB db = DB.openDBFromCLI(cl)) { System.out.print("Loading/updating LCMS scan files into DB\n"); ScanFile.insertOrUpdateScanFilesInDirectory(db, lcmsDir); String construct = cl.getOptionValue(OPTION_CONSTRUCT); List<LCMSWell> lcmsWells = LCMSWell.getInstance().getByConstructID(db, construct); Collections.sort(lcmsWells, new Comparator<LCMSWell>() { @Override public int compare(LCMSWell o1, LCMSWell o2) { return o1.getId().compareTo(o2.getId()); } }); Set<String> uniqueMSIDs = new HashSet<>(); Map<Integer, Plate> platesById = new HashMap<>(); System.out.format("\n\n-- Construct %s --\n\n", construct); List<ChemicalAssociatedWithPathway> pathwayChems = ChemicalAssociatedWithPathway.getInstance() .getChemicalsAssociatedWithPathwayByConstructId(db, construct); System.out.print("Chemicals associated with pathway:\n"); System.out.format(" %-8s%-15s%-45s\n", "index", "kind", "chemical"); for (ChemicalAssociatedWithPathway chem : pathwayChems) { System.out.format(" %-8d%-15s%-45s\n", chem.getIndex(), chem.getKind(), chem.getChemical()); } System.out.print("\nLCMS wells:\n"); System.out.format(" %-15s%-6s%-15s%-15s%-15s\n", "barcode", "well", "msid", "fed", "lcms_count"); for (LCMSWell well : lcmsWells) { uniqueMSIDs.add(well.getMsid()); Plate p = platesById.get(well.getPlateId()); if (p == null) { // TODO: migrate Plate to be a subclass of BaseDBModel. p = Plate.getPlateById(db, well.getPlateId()); platesById.put(p.getId(), p); } String chem = well.getChemical(); List<ScanFile> scanFiles = ScanFile.getScanFileByPlateIDRowAndColumn(db, p.getId(), well.getPlateRow(), well.getPlateColumn()); System.out.format(" %-15s%-6s%-15s%-15s%-15d\n", p.getBarcode(), well.getCoordinatesString(), well.getMsid(), chem == null || chem.isEmpty() ? "--" : chem, scanFiles.size()); System.out.flush(); } List<Integer> plateIds = Arrays.asList(platesById.keySet().toArray(new Integer[platesById.size()])); Collections.sort(plateIds); System.out.print("\nAppears in plates:\n"); for (Integer id : plateIds) { Plate p = platesById.get(id); System.out.format(" %s: %s\n", p.getBarcode(), p.getName()); } List<String> msids = Arrays.asList(uniqueMSIDs.toArray(new String[uniqueMSIDs.size()])); Collections.sort(msids); System.out.format("\nMSIDS: %s\n", StringUtils.join(msids, ", ")); Set<String> availableNegativeControls = new HashSet<>(); for (Map.Entry<Integer, Plate> entry : platesById.entrySet()) { List<LCMSWell> wells = LCMSWell.getInstance().getByPlateId(db, entry.getKey()); for (LCMSWell well : wells) { if (!construct.equals(well.getComposition())) { availableNegativeControls.add(well.getComposition()); } } } // Print available standards for each step w/ plate barcodes and coordinates. System.out.format("\nAvailable Standards:\n"); Map<Integer, Plate> plateCache = new HashMap<>(); for (ChemicalAssociatedWithPathway chem : pathwayChems) { List<StandardWell> matchingWells = StandardWell.getInstance().getStandardWellsByChemical(db, chem.getChemical()); for (StandardWell well : matchingWells) { if (!plateCache.containsKey(well.getPlateId())) { Plate p = Plate.getPlateById(db, well.getPlateId()); plateCache.put(p.getId(), p); } } Map<Integer, List<StandardWell>> standardWellsByPlateId = new HashMap<>(); for (StandardWell well : matchingWells) { List<StandardWell> plateWells = standardWellsByPlateId.get(well.getPlateId()); if (plateWells == null) { plateWells = new ArrayList<>(); standardWellsByPlateId.put(well.getPlateId(), plateWells); } plateWells.add(well); } List<Pair<String, Integer>> plateBarcodes = new ArrayList<>(plateCache.size()); for (Plate p : plateCache.values()) { if (p.getBarcode() == null) { plateBarcodes.add(Pair.of("(no barcode)", p.getId())); } else { plateBarcodes.add(Pair.of(p.getBarcode(), p.getId())); } } Collections.sort(plateBarcodes); System.out.format(" %s:\n", chem.getChemical()); for (Pair<String, Integer> barcodePair : plateBarcodes) { // TODO: hoist this whole sorting/translation step into a utility class. List<StandardWell> wells = standardWellsByPlateId.get(barcodePair.getRight()); if (wells == null) { // Don't print plates that don't apply to this chemical, which can happen because we're caching the plates. continue; } Collections.sort(wells, new Comparator<StandardWell>() { @Override public int compare(StandardWell o1, StandardWell o2) { int c = o1.getPlateRow().compareTo(o2.getPlateRow()); if (c != 0) return c; return o1.getPlateColumn().compareTo(o2.getPlateColumn()); } }); List<String> descriptions = new ArrayList<>(wells.size()); for (StandardWell well : wells) { descriptions.add(String.format("%s in %s%s", well.getCoordinatesString(), well.getMedia(), well.getConcentration() == null ? "" : String.format(" c. %f", well.getConcentration()))); } System.out.format(" %s: %s\n", barcodePair.getLeft(), StringUtils.join(descriptions, ", ")); } } List<String> negativeControlStrains = Arrays .asList(availableNegativeControls.toArray(new String[availableNegativeControls.size()])); Collections.sort(negativeControlStrains); System.out.format("\nAvailable negative controls: %s\n", StringUtils.join(negativeControlStrains, ",")); System.out.print("\n----------\n"); System.out.print("\n\n"); } }
From source file:com.act.biointerpretation.l2expansion.L2ExpansionDriver.java
public static void main(String[] args) throws Exception { // Build command line parser. Options opts = new Options(); for (Option.Builder b : OPTION_BUILDERS) { opts.addOption(b.build());/*from ww w . java2 s . c o m*/ } CommandLine cl = null; try { CommandLineParser parser = new DefaultParser(); cl = parser.parse(opts, args); } catch (ParseException e) { LOGGER.error("Argument parsing failed: %s", e.getMessage()); HELP_FORMATTER.printHelp(L2ExpansionDriver.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); System.exit(1); } // Print help. if (cl.hasOption(OPTION_HELP)) { HELP_FORMATTER.printHelp(L2ExpansionDriver.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); return; } // Get output files. String outputPath = cl.getOptionValue(OPTION_OUTPUT_PATH); File outputFile = new File(outputPath); if (outputFile.isDirectory() || outputFile.exists()) { LOGGER.error("Supplied output file is a directory or already exists."); System.exit(1); } outputFile.createNewFile(); File inchiOutputFile = new File(outputPath + ".inchis"); if (inchiOutputFile.isDirectory() || inchiOutputFile.exists()) { LOGGER.error("Supplied inchi output file is a directory or already exists."); System.exit(1); } inchiOutputFile.createNewFile(); Optional<OutputStream> maybeProgressStream = Optional.empty(); if (cl.hasOption(OPTION_PROGRESS_PATH)) { String progressPath = cl.getOptionValue(OPTION_PROGRESS_PATH); File progressFile = new File(progressPath); LOGGER.info("Writing incremental results to file at %s", progressFile.getAbsolutePath()); if (progressFile.isDirectory() || progressFile.exists()) { LOGGER.error("Supplied progress file is a directory or already exists."); System.exit(1); } maybeProgressStream = Optional.of(new FileOutputStream(progressFile)); } // Get metabolite list L2InchiCorpus inchiCorpus = getInchiCorpus(cl, OPTION_METABOLITES); LOGGER.info("%d substrate inchis.", inchiCorpus.getInchiList().size()); Integer maxMass = NO_MASS_THRESHOLD; if (cl.hasOption(OPTION_MASS_THRESHOLD)) { maxMass = Integer.parseInt(cl.getOptionValue(OPTION_MASS_THRESHOLD)); LOGGER.info("Filtering out substrates with mass more than %d daltons.", maxMass); } inchiCorpus.filterByMass(maxMass); LOGGER.info("%d substrate inchis that are importable as molecules.", inchiCorpus.getInchiList().size()); PredictionGenerator generator = new AllPredictionsGenerator(new ReactionProjector()); L2Expander expander = buildExpander(cl, inchiCorpus, generator); L2PredictionCorpus predictionCorpus = expander.getPredictions(maybeProgressStream); LOGGER.info("Done with L2 expansion. Produced %d predictions.", predictionCorpus.getCorpus().size()); LOGGER.info("Writing corpus to file."); predictionCorpus.writePredictionsToJsonFile(outputFile); L2InchiCorpus productInchis = new L2InchiCorpus(predictionCorpus.getUniqueProductInchis()); productInchis.writeToFile(inchiOutputFile); LOGGER.info("L2ExpansionDriver complete!"); }
From source file:com.act.lcms.v2.fullindex.Searcher.java
public static void main(String args[]) throws Exception { CLIUtil cliUtil = new CLIUtil(Searcher.class, HELP_MESSAGE, OPTION_BUILDERS); CommandLine cl = cliUtil.parseCommandLine(args); File indexDir = new File(cl.getOptionValue(OPTION_INDEX_PATH)); if (!indexDir.exists() || !indexDir.isDirectory()) { cliUtil.failWithMessage("Unable to read index directory at %s", indexDir.getAbsolutePath()); }//from ww w . j ava 2 s. c o m if (!cl.hasOption(OPTION_MZ_RANGE) && !cl.hasOption(OPTION_TIME_RANGE)) { cliUtil.failWithMessage( "Extracting all readings is not currently supported; specify an m/z or time range"); } Pair<Double, Double> mzRange = extractRange(cl.getOptionValue(OPTION_MZ_RANGE)); Pair<Double, Double> timeRange = extractRange(cl.getOptionValue(OPTION_TIME_RANGE)); Searcher searcher = Factory.makeSearcher(indexDir); List<TMzI> results = searcher.searchIndexInRange(mzRange, timeRange); if (cl.hasOption(OPTION_OUTPUT_FILE)) { try (PrintWriter writer = new PrintWriter(new FileWriter(cl.getOptionValue(OPTION_OUTPUT_FILE)))) { Searcher.writeOutput(writer, results); } } else { // Don't close the print writer if we're writing to stdout. Searcher.writeOutput(new PrintWriter(new OutputStreamWriter(System.out)), results); } LOGGER.info("Done"); }
From source file:com.sangupta.keepwalking.MergeRepo.java
/** * @param args// w w w .ja va 2s .c om * @throws IOException */ public static void main(String[] args) throws IOException { if (args.length != 3) { usage(); return; } final String previousRepo = args[0]; final String newerRepo = args[1]; final String mergedRepo = args[2]; final File previous = new File(previousRepo); final File newer = new File(newerRepo); final File merged = new File(mergedRepo); if (!(previous.exists() && previous.isDirectory())) { System.out.println("The previous version does not exists or is not a directory."); return; } if (!(newer.exists() && newer.isDirectory())) { System.out.println("The newer version does not exists or is not a directory."); return; } final IOFileFilter directoryFilter = FileFilterUtils.makeCVSAware(FileFilterUtils.makeSVNAware(null)); final Collection<File> olderFiles = FileUtils.listFiles(previous, TrueFileFilter.TRUE, directoryFilter); final Collection<File> newerFiles = FileUtils.listFiles(newer, TrueFileFilter.TRUE, directoryFilter); // build a list of unique paths System.out.println("Reading files from older version..."); List<String> olderPaths = new ArrayList<String>(); for (File oldFile : olderFiles) { olderPaths.add(getRelativePath(oldFile, previous)); } System.out.println("Reading files from newer version..."); List<String> newerPaths = new ArrayList<String>(); for (File newerFile : newerFiles) { newerPaths.add(getRelativePath(newerFile, newer)); } // find which files have been removed from Perforce depot List<String> filesRemoved = new ArrayList<String>(olderPaths); filesRemoved.removeAll(newerPaths); System.out.println("Files removed in newer version: " + filesRemoved.size()); for (String removed : filesRemoved) { System.out.print(" "); System.out.println(removed); } // find which files have been added in Perforce depot List<String> filesAdded = new ArrayList<String>(newerPaths); filesAdded.removeAll(olderPaths); System.out.println("Files added in newer version: " + filesAdded.size()); for (String added : filesAdded) { System.out.print(" "); System.out.println(added); } // find which files are common // now check if they have modified or not newerPaths.retainAll(olderPaths); List<String> modified = checkModifiedFiles(newerPaths, previous, newer); System.out.println("Files modified in newer version: " + modified.size()); for (String modify : modified) { System.out.print(" "); System.out.println(modify); } // clean any previous existence of merged repo System.out.println("Cleaning any previous merged repositories..."); if (merged.exists() && merged.isDirectory()) { FileUtils.deleteDirectory(merged); } System.out.println("Merging from newer to older repository..."); // copy the original SVN repo to merged FileUtils.copyDirectory(previous, merged); // now remove all files that need to be for (String removed : filesRemoved) { File toRemove = new File(merged, removed); toRemove.delete(); } // now add all files that are new in perforce for (String added : filesAdded) { File toAdd = new File(newer, added); File destination = new File(merged, added); FileUtils.copyFile(toAdd, destination); } // now over-write modified files for (String changed : modified) { File change = new File(newer, changed); File destination = new File(merged, changed); destination.delete(); FileUtils.copyFile(change, destination); } System.out.println("Done merging."); }