List of usage examples for java.io File listFiles
public File[] listFiles(FileFilter filter)
From source file:imp.lstm.main.Driver.java
public static void main(String[] args) throws FileNotFoundException, IOException, ConfigurationException, InvalidParametersException { FileBasedConfigurationBuilder<PropertiesConfiguration> builder = new FileBasedConfigurationBuilder<>( PropertiesConfiguration.class).configure( new Parameters().properties().setFileName(args[0]).setThrowExceptionOnMissing(true) .setListDelimiterHandler(new DefaultListDelimiterHandler(';')) .setIncludesAllowed(false)); Configuration config = builder.getConfiguration(); String inputSongPath = config.getString("input_song"); String outputFolderPath = config.getString("output_folder"); String autoEncoderParamsPath = config.getString("auto_encoder_params"); String nameGeneratorParamsPath = config.getString("name_generator_params"); String queueFolderPath = config.getString("queue_folder"); String referenceQueuePath = config.getString("reference_queue", "nil"); String inputCorpusFolder = config.getString("input_corpus_folder"); boolean shouldWriteQueue = config.getBoolean("should_write_generated_queue"); boolean frankensteinTest = config.getBoolean("queue_tests_frankenstein"); boolean interpolateTest = config.getBoolean("queue_tests_interpolation"); boolean iterateOverCorpus = config.getBoolean("iterate_over_corpus", false); boolean shouldGenerateSongTitle = config.getBoolean("generate_song_title"); boolean shouldGenerateSong = config.getBoolean("generate_leadsheet"); LogTimer.initStartTime(); //start our logging timer to keep track of our execution time LogTimer.log("Creating name generator..."); //here is just silly code for generating name based on an LSTM lol $wag LSTM lstm = new LSTM(); FullyConnectedLayer fullLayer = new FullyConnectedLayer(Operations.None); Loadable titleNetLoader = new Loadable() { @Override//from w ww.jav a 2s. com public boolean load(INDArray array, String path) { String car = pathCar(path); String cdr = pathCdr(path); switch (car) { case "full": return fullLayer.load(array, cdr); case "lstm": return lstm.load(array, cdr); default: return false; } } }; LogTimer.log("Packing name generator from files..."); (new NetworkConnectomeLoader()).load(nameGeneratorParamsPath, titleNetLoader); String characterString = " !\"'[],-.01245679:?ABCDEFGHIJKLMNOPQRSTUVWYZabcdefghijklmnopqrstuvwxyz"; //Initialization LogTimer.log("Creating autoencoder..."); int inputSize = 34; int outputSize = EncodingParameters.noteEncoder.getNoteLength(); int featureVectorSize = 100; ProductCompressingAutoencoder autoencoder = new ProductCompressingAutoencoder(24, 48, 84 + 1, false); //create our network int numInterpolationDivisions = 5; //"pack" the network from weights and biases file directory LogTimer.log("Packing autoencoder from files"); (new NetworkConnectomeLoader()).load(autoEncoderParamsPath, autoencoder); File[] songFiles; if (iterateOverCorpus) { songFiles = new File(inputCorpusFolder).listFiles(); } else { songFiles = new File[] { new File(inputSongPath) }; } for (File inputFile : songFiles) { (new NetworkConnectomeLoader()).refresh(autoEncoderParamsPath, autoencoder, "initialstate"); String songTitle; if (shouldGenerateSong) { Random rand = new Random(); AVector charOut = Vector.createLength(characterString.length()); GroupedSoftMaxSampler sampler = new GroupedSoftMaxSampler( new Group[] { new Group(0, characterString.length(), true) }); songTitle = ""; for (int i = 0; i < 50; i++) { charOut = fullLayer.forward(lstm.step(charOut)); charOut = sampler.filter(charOut); int charIndex = 0; for (; charIndex < charOut.length(); charIndex++) { if (charOut.get(charIndex) == 1.0) { break; } } songTitle += characterString.substring(charIndex, charIndex + 1); } songTitle = songTitle.trim(); LogTimer.log("Generated song name: " + songTitle); } else { songTitle = "The Song We Never Name"; } LogTimer.log("Reading file..."); LeadSheetDataSequence inputSequence = LeadSheetIO.readLeadSheet(inputFile); //read our leadsheet to get a data vessel as retrieved in rbm-provisor LeadSheetDataSequence outputSequence = inputSequence.copy(); outputSequence.clearMelody(); if (interpolateTest) { LeadSheetDataSequence additionalOutput = outputSequence.copy(); for (int i = 0; i < numInterpolationDivisions; i++) { outputSequence.concat(additionalOutput.copy()); } } LeadSheetDataSequence decoderInputSequence = outputSequence.copy(); LogTimer.startLog("Encoding data..."); //TradingTimer.initStart(); //start our trading timer to keep track our our generation versus realtime play while (inputSequence.hasNext()) { //iterate through time steps in input data //TradingTimer.waitForNextTimedInput(); autoencoder.encodeStep(inputSequence.retrieve()); //feed the resultant input vector into the network if (advanceDecoding) { //if we are using advance decoding (we start decoding as soon as we can) if (autoencoder.canDecode()) { //if queue has enough data to decode from outputSequence.pushStep(null, null, autoencoder.decodeStep(decoderInputSequence.retrieve())); //take sampled data for a timestep from autoencoder //TradingTimer.logTimestep(); //log our time to TradingTimer so we can know how far ahead of realtime we are } } } LogTimer.endLog(); if (shouldWriteQueue) { String queueFilePath = queueFolderPath + java.io.File.separator + inputFile.getName().replace(".ls", ".q"); FragmentedNeuralQueue currQueue = autoencoder.getQueue(); currQueue.writeToFile(queueFilePath); LogTimer.log("Wrote queue " + inputFile.getName().replace(".ls", ".q") + " to file..."); } if (shouldGenerateSong) { if (interpolateTest) { FragmentedNeuralQueue refQueue = new FragmentedNeuralQueue(); refQueue.initFromFile(referenceQueuePath); FragmentedNeuralQueue currQueue = autoencoder.getQueue(); //currQueue.writeToFile(queueFilePath); autoencoder.setQueue(currQueue.copy()); while (autoencoder.hasDataStepsLeft()) { //we are done encoding all time steps, so just finish decoding!{ outputSequence.pushStep(null, null, autoencoder.decodeStep(decoderInputSequence.retrieve())); //take sampled data for a timestep from autoencoder //TradingTimer.logTimestep(); //log our time to TradingTimer so we can know how far ahead of realtime we are } for (int i = 1; i <= numInterpolationDivisions; i++) { System.out.println("Starting interpolation " + ((1.0 / numInterpolationDivisions) * (i))); (new NetworkConnectomeLoader()).refresh(autoEncoderParamsPath, autoencoder, "initialstate"); FragmentedNeuralQueue currCopy = currQueue.copy(); currCopy.basicInterpolate(refQueue, (1.0 / numInterpolationDivisions) * (i)); autoencoder.setQueue(currCopy); int timeStep = 0; while (autoencoder.hasDataStepsLeft()) { //we are done encoding all time steps, so just finish decoding!{ System.out.println("interpolation " + i + " step " + ++timeStep); outputSequence.pushStep(null, null, autoencoder.decodeStep(decoderInputSequence.retrieve())); //take sampled data for a timestep from autoencoder //TradingTimer.logTimestep(); //log our time to TradingTimer so we can know how far ahead of realtime we are } } } if (frankensteinTest) { LogTimer.startLog("Loading queues"); File queueFolder = new File(queueFolderPath); int numComponents = config.getInt("frankenstein_num_components", 5); int numCombinations = config.getInt("frankenstein_num_combinations", 6); double interpolationMagnitude = config.getDouble("frankenstein_magnitude", 2.0); if (queueFolder.isDirectory()) { File[] queueFiles = queueFolder.listFiles(new FilenameFilter() { @Override public boolean accept(File dir, String name) { return name.contains(".q"); } }); List<File> fileList = new ArrayList<>(); for (File file : queueFiles) { fileList.add(file); } Collections.shuffle(fileList); int numSelectedFiles = (numComponents > queueFiles.length) ? queueFiles.length : numComponents; for (int i = 0; i < queueFiles.length - numSelectedFiles; i++) { fileList.remove(fileList.size() - 1); } List<FragmentedNeuralQueue> queuePopulation = new ArrayList<>(fileList.size()); songTitle += " - a mix of "; for (File file : fileList) { FragmentedNeuralQueue newQueue = new FragmentedNeuralQueue(); newQueue.initFromFile(file.getPath()); queuePopulation.add(newQueue); songTitle += file.getName().replaceAll(".ls", "") + ", "; } LogTimer.endLog(); LeadSheetDataSequence additionalOutput = outputSequence.copy(); for (int i = 1; i < numCombinations; i++) { outputSequence.concat(additionalOutput.copy()); } decoderInputSequence = outputSequence.copy(); FragmentedNeuralQueue origQueue = autoencoder.getQueue(); for (int i = 0; i < numCombinations; i++) { LogTimer.startLog("Performing queue interpolation..."); AVector combinationStrengths = Vector.createLength(queuePopulation.size()); Random vectorRand = new Random(i); for (int j = 0; j < combinationStrengths.length(); j++) { combinationStrengths.set(j, vectorRand.nextDouble()); } combinationStrengths.divide(combinationStrengths.elementSum()); FragmentedNeuralQueue currQueue = origQueue.copy(); for (int k = 0; k < combinationStrengths.length(); k++) { currQueue.basicInterpolate(queuePopulation.get(k), combinationStrengths.get(k) * interpolationMagnitude); } LogTimer.endLog(); autoencoder.setQueue(currQueue); LogTimer.startLog("Refreshing autoencoder state..."); (new NetworkConnectomeLoader()).refresh(autoEncoderParamsPath, autoencoder, "initialstate"); LogTimer.endLog(); LogTimer.startLog("Decoding segment..."); while (autoencoder.hasDataStepsLeft()) { //we are done encoding all time steps, so just finish decoding!{ outputSequence.pushStep(null, null, autoencoder.decodeStep(decoderInputSequence.retrieve())); //take sampled data for a timestep from autoencoder //TradingTimer.logTimestep(); //log our time to TradingTimer so we can know how far ahead of realtime we are } LogTimer.endLog(); } } } while (autoencoder.hasDataStepsLeft()) { //we are done encoding all time steps, so just finish decoding!{ outputSequence.pushStep(null, null, autoencoder.decodeStep(decoderInputSequence.retrieve())); //take sampled data for a timestep from autoencoder //TradingTimer.logTimestep(); //log our time to TradingTimer so we can know how far ahead of realtime we are } LogTimer.log("Writing file..."); String outputFilename = outputFolderPath + java.io.File.separator + inputFile.getName().replace(".ls", "_Output"); //we'll write our generated file with the same name plus "_Output" LeadSheetIO.writeLeadSheet(outputSequence, outputFilename, songTitle); System.out.println(outputFilename); } else { autoencoder.setQueue(new FragmentedNeuralQueue()); } } LogTimer.log("Process finished"); //Done! }
From source file:act.installer.pubchem.PubchemTTLMerger.java
public static void main(String[] args) throws Exception { org.apache.commons.cli.Options opts = new org.apache.commons.cli.Options(); for (Option.Builder b : OPTION_BUILDERS) { opts.addOption(b.build());/* ww w.j ava2s . c om*/ } CommandLine cl = null; try { CommandLineParser parser = new DefaultParser(); cl = parser.parse(opts, args); } catch (ParseException e) { System.err.format("Argument parsing failed: %s\n", e.getMessage()); HELP_FORMATTER.printHelp(PubchemTTLMerger.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); System.exit(1); } if (cl.hasOption("help")) { HELP_FORMATTER.printHelp(PubchemTTLMerger.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); return; } PubchemTTLMerger merger = new PubchemTTLMerger(); File rocksDBFile = new File(cl.getOptionValue(OPTION_INDEX_PATH)); if (cl.hasOption(OPTION_ONLY_MERGE)) { if (!(rocksDBFile.exists() && rocksDBFile.isDirectory())) { System.err.format("Must specify an existing RocksDB index when using '%s'.\n", OPTION_ONLY_MERGE); HELP_FORMATTER.printHelp(PubchemTTLMerger.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); System.exit(1); } merger.finish(merger.merge(rocksDBFile)); return; } File rdfDir = new File(cl.getOptionValue(OPTION_RDF_DIRECTORY)); if (!rdfDir.isDirectory()) { System.err.format("Must specify a directory of RDF files to be parsed.\n"); HELP_FORMATTER.printHelp(PubchemTTLMerger.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); System.exit(1); } File[] filesInDirectoryArray = rdfDir.listFiles(new FilenameFilter() { private static final String TTL_GZ_SUFFIX = ".ttl.gz"; @Override public boolean accept(File dir, String name) { return name.endsWith(TTL_GZ_SUFFIX); } }); if (filesInDirectoryArray == null || filesInDirectoryArray.length == 0) { System.err.format("Found zero compressed TTL files in directory at '%s'.\n", rdfDir.getAbsolutePath()); HELP_FORMATTER.printHelp(PubchemTTLMerger.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); System.exit(1); } // Sort files for stability/sanity. List<File> filesInDirectory = Arrays.asList(filesInDirectoryArray); Collections.sort(filesInDirectory); if (cl.hasOption(OPTION_ONLY_SYNONYMS)) { filesInDirectory = filterByFileContents(filesInDirectory, PC_RDF_DATA_FILE_CONFIG.HASH_TO_SYNONYM); } if (cl.hasOption(OPTION_ONLY_MESH)) { filesInDirectory = filterByFileContents(filesInDirectory, PC_RDF_DATA_FILE_CONFIG.HASH_TO_MESH); } if (cl.hasOption(OPTION_ONLY_PUBCHEM_IDS)) { filesInDirectory = filterByFileContents(filesInDirectory, PC_RDF_DATA_FILE_CONFIG.HASH_TO_CID); } if (filesInDirectory.size() == 0) { System.err.format( "Arrived at index initialization with no files to process. " + "Maybe too many filters were specified? synonyms: %s, MeSH: %s, Pubchem ids: %s\n", cl.hasOption(OPTION_ONLY_SYNONYMS), cl.hasOption(OPTION_ONLY_MESH), cl.hasOption(OPTION_ONLY_PUBCHEM_IDS)); HELP_FORMATTER.printHelp(PubchemTTLMerger.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); System.exit(1); } RocksDB.loadLibrary(); Pair<RocksDB, Map<COLUMN_FAMILIES, ColumnFamilyHandle>> dbAndHandles = null; try { if (rocksDBFile.exists()) { if (!cl.hasOption(OPTION_OPEN_EXISTING_OKAY)) { System.err.format( "Index directory at '%s' already exists, delete before retrying or add '%s' option to reuse.\n", rocksDBFile.getAbsolutePath(), OPTION_OPEN_EXISTING_OKAY); HELP_FORMATTER.printHelp(PubchemTTLMerger.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); System.exit(1); } else { LOGGER.info("Reusing existing index at %s", rocksDBFile.getAbsolutePath()); dbAndHandles = openExistingRocksDB(rocksDBFile); } } else { LOGGER.info("Creating new index at %s", rocksDBFile.getAbsolutePath()); dbAndHandles = createNewRocksDB(rocksDBFile); } merger.buildIndex(dbAndHandles, filesInDirectory); merger.merge(dbAndHandles); } finally { if (dbAndHandles != null) { merger.finish(dbAndHandles); } } }
From source file:data_gen.Data_gen.java
public static void main(String[] args) throws FileNotFoundException, IOException { long startTime = System.nanoTime(); if (args.length < 2) { System.out.println("Usage:"); System.out.println(// w ww. j a v a 2 s . c o m "java -jar \"jarfile\" [Directory of text source folder] [Dierctory of configration file]" + "\n"); System.exit(0); } String Dir = args[0]; // get text source dir from user String config_dir = args[1]; File folder = new File(Dir); if (folder.isDirectory() == false) { System.out.println("Text souce folder is not a Directory." + "\n"); System.exit(0); } if (!config_dir.endsWith(".properties") && !config_dir.endsWith(".PROPERTIES")) { System.out.println("\n" + "There was error parsing dataset parameters from configuration file, make sure you have the 4 parameters specified and the right type of file" + "\n"); System.exit(0); } listOfFiles = folder.listFiles(new FilenameFilter() { @Override public boolean accept(File dir, String name) { return name.toLowerCase().endsWith(".txt"); } }); if (listOfFiles.length == 0) { System.out.println("Text source folder is empty ! Have at least one .txt file there" + "\n"); System.exit(0); } System.out.println("\n"); Parse_Document_values(config_dir);// parse config file to get class attribute values document_size = Docments_Total_size / documents_count; // to get each document size max = (long) ((double) document_size * 1.8); min = (long) ((double) document_size * 0.2); schema_fields = Parse_Document_fields(config_dir); try { LineIterator it = FileUtils.lineIterator(listOfFiles[0]); while (it.hasNext()) { tx.add(it.nextLine()); } } catch (NullPointerException | FileNotFoundException e) { System.out.println("The text source file could not be found." + "\n"); System.exit(0); } new File(output_dir).mkdir(); //////////////////////////////////////////////////////////////// build json or .dat //////////////////////////////////////////////////////////////////// if (Default_DataSet_name.endsWith(".json")) { Build_json_file(config_dir, startTime); } if (Default_DataSet_name.endsWith(".dat")) { Build_dat_file(config_dir, startTime); } generate_xml(); generate_field_map(); }
From source file:com.twentyn.patentScorer.ScoreMerger.java
public static void main(String[] args) throws Exception { System.out.println("Starting up..."); System.out.flush();//from ww w . j a va 2s.c o m Options opts = new Options(); opts.addOption(Option.builder("h").longOpt("help").desc("Print this help message and exit").build()); opts.addOption(Option.builder("r").longOpt("results").required().hasArg() .desc("A directory of search results to read").build()); opts.addOption(Option.builder("s").longOpt("scores").required().hasArg() .desc("A directory of patent classification scores to read").build()); opts.addOption(Option.builder("o").longOpt("output").required().hasArg() .desc("The output file where results will be written.").build()); HelpFormatter helpFormatter = new HelpFormatter(); CommandLineParser cmdLineParser = new DefaultParser(); CommandLine cmdLine = null; try { cmdLine = cmdLineParser.parse(opts, args); } catch (ParseException e) { System.out.println("Caught exception when parsing command line: " + e.getMessage()); helpFormatter.printHelp("DocumentIndexer", opts); System.exit(1); } if (cmdLine.hasOption("help")) { helpFormatter.printHelp("DocumentIndexer", opts); System.exit(0); } File scoresDirectory = new File(cmdLine.getOptionValue("scores")); if (cmdLine.getOptionValue("scores") == null || !scoresDirectory.isDirectory()) { LOGGER.error("Not a directory of score files: " + cmdLine.getOptionValue("scores")); } File resultsDirectory = new File(cmdLine.getOptionValue("results")); if (cmdLine.getOptionValue("results") == null || !resultsDirectory.isDirectory()) { LOGGER.error("Not a directory of results files: " + cmdLine.getOptionValue("results")); } FileWriter outputWriter = new FileWriter(cmdLine.getOptionValue("output")); ObjectMapper objectMapper = new ObjectMapper(); objectMapper.enable(SerializationFeature.INDENT_OUTPUT); objectMapper.setVisibility(PropertyAccessor.ALL, JsonAutoDetect.Visibility.ANY); FilenameFilter jsonFilter = new FilenameFilter() { public final Pattern JSON_PATTERN = Pattern.compile("\\.json$"); public boolean accept(File dir, String name) { return JSON_PATTERN.matcher(name).find(); } }; Map<String, PatentScorer.ClassificationResult> scores = new HashMap<>(); LOGGER.info("Reading scores from directory at " + scoresDirectory.getAbsolutePath()); for (File scoreFile : scoresDirectory.listFiles(jsonFilter)) { BufferedReader reader = new BufferedReader(new FileReader(scoreFile)); int count = 0; String line; while ((line = reader.readLine()) != null) { PatentScorer.ClassificationResult res = objectMapper.readValue(line, PatentScorer.ClassificationResult.class); scores.put(res.docId, res); count++; } LOGGER.info("Read " + count + " scores from " + scoreFile.getAbsolutePath()); } Map<String, List<DocumentSearch.SearchResult>> synonymsToResults = new HashMap<>(); Map<String, List<DocumentSearch.SearchResult>> inchisToResults = new HashMap<>(); LOGGER.info("Reading results from directory at " + resultsDirectory); // With help from http://stackoverflow.com/questions/6846244/jackson-and-generic-type-reference. JavaType resultsType = objectMapper.getTypeFactory().constructCollectionType(List.class, DocumentSearch.SearchResult.class); List<File> resultsFiles = Arrays.asList(resultsDirectory.listFiles(jsonFilter)); Collections.sort(resultsFiles, new Comparator<File>() { @Override public int compare(File o1, File o2) { return o1.getName().compareTo(o2.getName()); } }); for (File resultsFile : resultsFiles) { BufferedReader reader = new BufferedReader(new FileReader(resultsFile)); CharBuffer buffer = CharBuffer.allocate(Long.valueOf(resultsFile.length()).intValue()); int bytesRead = reader.read(buffer); LOGGER.info("Read " + bytesRead + " bytes from " + resultsFile.getName() + " (length is " + resultsFile.length() + ")"); List<DocumentSearch.SearchResult> results = objectMapper.readValue(new CharArrayReader(buffer.array()), resultsType); LOGGER.info("Read " + results.size() + " results from " + resultsFile.getAbsolutePath()); int count = 0; for (DocumentSearch.SearchResult sres : results) { for (DocumentSearch.ResultDocument resDoc : sres.getResults()) { String docId = resDoc.getDocId(); PatentScorer.ClassificationResult classificationResult = scores.get(docId); if (classificationResult == null) { LOGGER.warn("No classification result found for " + docId); } else { resDoc.setClassifierScore(classificationResult.getScore()); } } if (!synonymsToResults.containsKey(sres.getSynonym())) { synonymsToResults.put(sres.getSynonym(), new ArrayList<DocumentSearch.SearchResult>()); } synonymsToResults.get(sres.getSynonym()).add(sres); count++; if (count % 1000 == 0) { LOGGER.info("Processed " + count + " search result documents"); } } } Comparator<DocumentSearch.ResultDocument> resultDocumentComparator = new Comparator<DocumentSearch.ResultDocument>() { @Override public int compare(DocumentSearch.ResultDocument o1, DocumentSearch.ResultDocument o2) { int cmp = o2.getClassifierScore().compareTo(o1.getClassifierScore()); if (cmp != 0) { return cmp; } cmp = o2.getScore().compareTo(o1.getScore()); return cmp; } }; for (Map.Entry<String, List<DocumentSearch.SearchResult>> entry : synonymsToResults.entrySet()) { DocumentSearch.SearchResult newSearchRes = null; // Merge all result documents into a single search result. for (DocumentSearch.SearchResult sr : entry.getValue()) { if (newSearchRes == null) { newSearchRes = sr; } else { newSearchRes.getResults().addAll(sr.getResults()); } } if (newSearchRes == null || newSearchRes.getResults() == null) { LOGGER.error("Search results for " + entry.getKey() + " are null."); continue; } Collections.sort(newSearchRes.getResults(), resultDocumentComparator); if (!inchisToResults.containsKey(newSearchRes.getInchi())) { inchisToResults.put(newSearchRes.getInchi(), new ArrayList<DocumentSearch.SearchResult>()); } inchisToResults.get(newSearchRes.getInchi()).add(newSearchRes); } List<String> sortedKeys = new ArrayList<String>(inchisToResults.keySet()); Collections.sort(sortedKeys); List<GroupedInchiResults> orderedResults = new ArrayList<>(sortedKeys.size()); Comparator<DocumentSearch.SearchResult> synonymSorter = new Comparator<DocumentSearch.SearchResult>() { @Override public int compare(DocumentSearch.SearchResult o1, DocumentSearch.SearchResult o2) { return o1.getSynonym().compareTo(o2.getSynonym()); } }; for (String inchi : sortedKeys) { List<DocumentSearch.SearchResult> res = inchisToResults.get(inchi); Collections.sort(res, synonymSorter); orderedResults.add(new GroupedInchiResults(inchi, res)); } objectMapper.writerWithView(Object.class).writeValue(outputWriter, orderedResults); outputWriter.close(); }
From source file:ValidateLicenseHeaders.java
/** * ValidateLicenseHeaders jboss-src-root * /*from w w w . ja v a 2s. com*/ * @param args */ public static void main(String[] args) throws Exception { if (args.length == 0 || args[0].startsWith("-h")) { log.info("Usage: ValidateLicenseHeaders [-addheader] jboss-src-root"); System.exit(1); } int rootArg = 0; if (args.length == 2) { if (args[0].startsWith("-add")) addDefaultHeader = true; else { log.severe("Uknown argument: " + args[0]); log.info("Usage: ValidateLicenseHeaders [-addheader] jboss-src-root"); System.exit(1); } rootArg = 1; } File jbossSrcRoot = new File(args[rootArg]); if (jbossSrcRoot.exists() == false) { log.info("Src root does not exist, check " + jbossSrcRoot.getAbsolutePath()); System.exit(1); } URL u = Thread.currentThread().getContextClassLoader() .getResource("META-INF/services/javax.xml.parsers.DocumentBuilderFactory"); System.err.println(u); // Load the valid copyright statements for the licenses File licenseInfo = new File(jbossSrcRoot, "varia/src/etc/license-info.xml"); if (licenseInfo.exists() == false) { log.severe("Failed to find the varia/src/etc/license-info.xml under the src root"); System.exit(1); } DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); DocumentBuilder db = factory.newDocumentBuilder(); Document doc = db.parse(licenseInfo); NodeList licenses = doc.getElementsByTagName("license"); for (int i = 0; i < licenses.getLength(); i++) { Element license = (Element) licenses.item(i); String key = license.getAttribute("id"); ArrayList headers = new ArrayList(); licenseHeaders.put(key, headers); NodeList copyrights = license.getElementsByTagName("terms-header"); for (int j = 0; j < copyrights.getLength(); j++) { Element copyright = (Element) copyrights.item(j); copyright.normalize(); String id = copyright.getAttribute("id"); // The id will be blank if there is no id attribute if (id.length() == 0) continue; String text = getElementContent(copyright); if (text == null) continue; // Replace all duplicate whitespace and '*' with a single space text = text.replaceAll("[\\s*]+", " "); if (text.length() == 1) continue; text = text.toLowerCase().trim(); // Replace any copyright date0-date1,date2 with copyright ... text = text.replaceAll(COPYRIGHT_REGEX, "..."); LicenseHeader lh = new LicenseHeader(id, text); headers.add(lh); } } log.fine(licenseHeaders.toString()); File[] files = jbossSrcRoot.listFiles(dotJavaFilter); log.info("Root files count: " + files.length); processSourceFiles(files, 0); log.info("Processed " + totalCount); log.info("Updated jboss headers: " + jbossCount); // Files with no headers details log.info("Files with no headers: " + noheaders.size()); FileWriter fw = new FileWriter("NoHeaders.txt"); for (Iterator iter = noheaders.iterator(); iter.hasNext();) { File f = (File) iter.next(); fw.write(f.getAbsolutePath()); fw.write('\n'); } fw.close(); // Files with unknown headers details log.info("Files with invalid headers: " + invalidheaders.size()); fw = new FileWriter("InvalidHeaders.txt"); for (Iterator iter = invalidheaders.iterator(); iter.hasNext();) { File f = (File) iter.next(); fw.write(f.getAbsolutePath()); fw.write('\n'); } fw.close(); // License usage summary log.info("Creating HeadersSummary.txt"); fw = new FileWriter("HeadersSummary.txt"); for (Iterator iter = licenseHeaders.entrySet().iterator(); iter.hasNext();) { Map.Entry entry = (Map.Entry) iter.next(); String key = (String) entry.getKey(); fw.write("+++ License type=" + key); fw.write('\n'); List list = (List) entry.getValue(); Iterator jiter = list.iterator(); while (jiter.hasNext()) { LicenseHeader lh = (LicenseHeader) jiter.next(); fw.write('\t'); fw.write(lh.id); fw.write(", count="); fw.write("" + lh.count); fw.write('\n'); } } fw.close(); }
From source file:Main.java
static public File[] getDirectories(File file) { return file.listFiles(new FileFilter() { @Override/*from w w w .j a va 2s. c o m*/ public boolean accept(File pathname) { return pathname.isDirectory(); } }); }
From source file:Main.java
private static File[] listFiles(File dir) { File[] fileList = dir.listFiles(new FileFilter() { public boolean accept(File file) { return file.isFile(); }/*from w w w. ja v a2 s.co m*/ }); return fileList; }
From source file:Main.java
private static File getCachImage(final String id, File cachDir) { File[] list = cachDir.listFiles(new FilenameFilter() { @Override/*from w w w .jav a 2 s . c om*/ public boolean accept(File dir, String filename) { return filename.contains(id); } }); if (list != null && list.length != 0) { return list[0]; } else { return null; } }
From source file:Main.java
public static boolean deleteFiles(final File directory, final String prefix) { for (File child : directory.listFiles(new FilenameFilter() { @Override//from w w w .j av a 2s . com public boolean accept(File dir, String name) { return name.startsWith(prefix); } })) { deleteRecursive(child); } return true; }
From source file:Main.java
private static void innerListFiles(Collection<File> files, File directory, FileFilter filter) { File[] found = directory.listFiles(filter); if (found != null) for (File file : found) if (file.isDirectory()) innerListFiles(files, file, filter); else/* www .j a v a 2s . co m*/ files.add(file); }