Example usage for java.io File listFiles

List of usage examples for java.io File listFiles

Introduction

In this page you can find the example usage for java.io File listFiles.

Prototype

public File[] listFiles() 

Source Link

Document

Returns an array of abstract pathnames denoting the files in the directory denoted by this abstract pathname.

Usage

From source file:WriteIndex.java

/**
 * @param args/*from ww w.  jav a 2 s  . c o  m*/
 */
public static void main(String[] args) throws IOException {

    File docs = new File("documents");
    File indexDir = new File(INDEX_DIRECTORY);

    Directory directory = FSDirectory.open(indexDir);

    Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_35);
    IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_35, analyzer);
    IndexWriter writer = new IndexWriter(directory, conf);
    writer.deleteAll();

    for (File file : docs.listFiles()) {
        Metadata metadata = new Metadata();
        ContentHandler handler = new BodyContentHandler();
        ParseContext context = new ParseContext();
        Parser parser = new AutoDetectParser();
        InputStream stream = new FileInputStream(file);
        try {
            parser.parse(stream, handler, metadata, context);
        } catch (TikaException e) {
            e.printStackTrace();
        } catch (SAXException e) {
            e.printStackTrace();
        } finally {
            stream.close();
        }

        String text = handler.toString();
        String fileName = file.getName();

        Document doc = new Document();
        doc.add(new Field("file", fileName, Store.YES, Index.NO));

        for (String key : metadata.names()) {
            String name = key.toLowerCase();
            String value = metadata.get(key);

            if (StringUtils.isBlank(value)) {
                continue;
            }

            if ("keywords".equalsIgnoreCase(key)) {
                for (String keyword : value.split(",?(\\s+)")) {
                    doc.add(new Field(name, keyword, Store.YES, Index.NOT_ANALYZED));
                }
            } else if ("title".equalsIgnoreCase(key)) {
                doc.add(new Field(name, value, Store.YES, Index.ANALYZED));
            } else {
                doc.add(new Field(name, fileName, Store.YES, Index.NOT_ANALYZED));
            }
        }
        doc.add(new Field("text", text, Store.NO, Index.ANALYZED));
        writer.addDocument(doc);

    }

    writer.commit();
    writer.deleteUnusedFiles();

    System.out.println(writer.maxDoc() + " documents written");
}

From source file:DIA_Umpire_Quant.DIA_Umpire_LCMSIDGen.java

/**
 * @param args the command line arguments
 *///w  w  w. j av a  2s .  c  o  m
public static void main(String[] args) throws FileNotFoundException, IOException, Exception {
    System.out.println(
            "=================================================================================================");
    System.out.println("DIA-Umpire LCMSID geneartor (version: " + UmpireInfo.GetInstance().Version + ")");
    if (args.length != 1) {
        System.out.println(
                "command format error, the correct format should be: java -jar -Xmx10G DIA_Umpire_LCMSIDGen.jar diaumpire_module.params");
        return;
    }
    try {
        ConsoleLogger.SetConsoleLogger(Level.INFO);
        ConsoleLogger.SetFileLogger(Level.DEBUG,
                FilenameUtils.getFullPath(args[0]) + "diaumpire_lcmsidgen.log");
    } catch (Exception e) {
    }

    Logger.getRootLogger().info("Version: " + UmpireInfo.GetInstance().Version);
    Logger.getRootLogger().info("Parameter file:" + args[0]);

    BufferedReader reader = new BufferedReader(new FileReader(args[0]));
    String line = "";
    String WorkFolder = "";
    int NoCPUs = 2;

    TandemParam tandemPara = new TandemParam(DBSearchParam.SearchInstrumentType.TOF5600);
    HashMap<String, File> AssignFiles = new HashMap<>();

    //<editor-fold defaultstate="collapsed" desc="Reading parameter file">
    while ((line = reader.readLine()) != null) {
        line = line.trim();
        Logger.getRootLogger().info(line);
        if (!"".equals(line) && !line.startsWith("#")) {
            //System.out.println(line);
            if (line.equals("==File list begin")) {
                do {
                    line = reader.readLine();
                    line = line.trim();
                    if (line.equals("==File list end")) {
                        continue;
                    } else if (!"".equals(line)) {
                        File newfile = new File(line);
                        if (newfile.exists()) {
                            AssignFiles.put(newfile.getAbsolutePath(), newfile);
                        } else {
                            Logger.getRootLogger().info("File: " + newfile + " does not exist.");
                        }
                    }
                } while (!line.equals("==File list end"));
            }
            if (line.split("=").length < 2) {
                continue;
            }
            String type = line.split("=")[0].trim();
            String value = line.split("=")[1].trim();
            switch (type) {
            case "Path": {
                WorkFolder = value;
                break;
            }
            case "path": {
                WorkFolder = value;
                break;
            }
            case "Thread": {
                NoCPUs = Integer.parseInt(value);
                break;
            }
            case "DecoyPrefix": {
                if (!"".equals(value)) {
                    tandemPara.DecoyPrefix = value;
                }
                break;
            }
            case "PeptideFDR": {
                tandemPara.PepFDR = Float.parseFloat(value);
                break;
            }
            }
        }
    }
    //</editor-fold>

    //Initialize PTM manager using compomics library
    PTMManager.GetInstance();

    //Generate DIA file list
    ArrayList<DIAPack> FileList = new ArrayList<>();

    File folder = new File(WorkFolder);
    if (!folder.exists()) {
        Logger.getRootLogger().info("The path : " + WorkFolder + " cannot be found.");
        System.exit(1);
    }
    for (final File fileEntry : folder.listFiles()) {
        if (fileEntry.isFile()
                && (fileEntry.getAbsolutePath().toLowerCase().endsWith(".mzxml")
                        | fileEntry.getAbsolutePath().toLowerCase().endsWith(".mzml"))
                && !fileEntry.getAbsolutePath().toLowerCase().endsWith("q1.mzxml")
                && !fileEntry.getAbsolutePath().toLowerCase().endsWith("q2.mzxml")
                && !fileEntry.getAbsolutePath().toLowerCase().endsWith("q3.mzxml")) {
            AssignFiles.put(fileEntry.getAbsolutePath(), fileEntry);
        }
        if (fileEntry.isDirectory()) {
            for (final File fileEntry2 : fileEntry.listFiles()) {
                if (fileEntry2.isFile()
                        && (fileEntry2.getAbsolutePath().toLowerCase().endsWith(".mzxml")
                                | fileEntry2.getAbsolutePath().toLowerCase().endsWith(".mzml"))
                        && !fileEntry2.getAbsolutePath().toLowerCase().endsWith("q1.mzxml")
                        && !fileEntry2.getAbsolutePath().toLowerCase().endsWith("q2.mzxml")
                        && !fileEntry2.getAbsolutePath().toLowerCase().endsWith("q3.mzxml")) {
                    AssignFiles.put(fileEntry2.getAbsolutePath(), fileEntry2);
                }
            }
        }
    }

    Logger.getRootLogger().info("No. of files assigned :" + AssignFiles.size());
    for (File fileEntry : AssignFiles.values()) {
        Logger.getRootLogger().info(fileEntry.getAbsolutePath());
    }

    //process each DIA file to genearate untargeted identifications
    for (File fileEntry : AssignFiles.values()) {
        String mzXMLFile = fileEntry.getAbsolutePath();
        if (mzXMLFile.toLowerCase().endsWith(".mzxml") | mzXMLFile.toLowerCase().endsWith(".mzml")) {
            long time = System.currentTimeMillis();

            DIAPack DiaFile = new DIAPack(mzXMLFile, NoCPUs);
            FileList.add(DiaFile);
            Logger.getRootLogger().info(
                    "=================================================================================================");
            Logger.getRootLogger().info("Processing " + mzXMLFile);
            if (!DiaFile.LoadDIASetting()) {
                Logger.getRootLogger().info("Loading DIA setting failed, job is incomplete");
                System.exit(1);
            }
            if (!DiaFile.LoadParams()) {
                Logger.getRootLogger().info("Loading parameters failed, job is incomplete");
                System.exit(1);
            }
            Logger.getRootLogger().info("Loading identification results " + mzXMLFile + "....");

            DiaFile.ParsePepXML(tandemPara, null);
            DiaFile.BuildStructure();
            if (!DiaFile.MS1FeatureMap.ReadPeakCluster()) {
                Logger.getRootLogger().info("Loading peak and structure failed, job is incomplete");
                System.exit(1);
            }
            DiaFile.MS1FeatureMap.ClearMonoisotopicPeakOfCluster();
            //Generate mapping between index of precursor feature and pseudo MS/MS scan index 
            DiaFile.GenerateClusterScanNomapping();
            //Doing quantification
            DiaFile.AssignQuant();
            DiaFile.ClearStructure();

            DiaFile.IDsummary.ReduceMemoryUsage();
            time = System.currentTimeMillis() - time;
            Logger.getRootLogger().info(mzXMLFile + " processed time:"
                    + String.format("%d hour, %d min, %d sec", TimeUnit.MILLISECONDS.toHours(time),
                            TimeUnit.MILLISECONDS.toMinutes(time)
                                    - TimeUnit.HOURS.toMinutes(TimeUnit.MILLISECONDS.toHours(time)),
                            TimeUnit.MILLISECONDS.toSeconds(time)
                                    - TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(time))));
        }
        Logger.getRootLogger().info("Job done");
        Logger.getRootLogger().info(
                "=================================================================================================");
    }
}

From source file:de.citec.sc.matoll.process.Matoll_CreateMax.java

public static void main(String[] args) throws IOException, ParserConfigurationException, SAXException,
        InstantiationException, IllegalAccessException, ClassNotFoundException, Exception {

    String directory;//from w  w  w  .ja  v  a  2  s.c o m
    String gold_standard_lexicon;
    String output_lexicon;
    String configFile;
    Language language;
    String output;

    Stopwords stopwords = new Stopwords();

    HashMap<String, Double> maxima;
    maxima = new HashMap<String, Double>();

    if (args.length < 3) {
        System.out.print("Usage: Matoll --mode=train/test <DIRECTORY> <CONFIG>\n");
        return;

    }

    //      Classifier classifier;

    directory = args[1];
    configFile = args[2];

    final Config config = new Config();

    config.loadFromFile(configFile);

    gold_standard_lexicon = config.getGoldStandardLexicon();

    String model_file = config.getModel();

    output_lexicon = config.getOutputLexicon();
    output = config.getOutput();

    language = config.getLanguage();

    LexiconLoader loader = new LexiconLoader();
    Lexicon gold = loader.loadFromFile(gold_standard_lexicon);

    Set<String> uris = new HashSet<>();
    //        Map<Integer,String> sentence_list = new HashMap<>();
    Map<Integer, Set<Integer>> mapping_words_sentences = new HashMap<>();

    //consider only properties
    for (LexicalEntry entry : gold.getEntries()) {
        try {
            for (Sense sense : entry.getSenseBehaviours().keySet()) {
                String tmp_uri = sense.getReference().getURI().replace("http://dbpedia.org/ontology/", "");
                if (!Character.isUpperCase(tmp_uri.charAt(0))) {
                    uris.add(sense.getReference().getURI());
                }
            }
        } catch (Exception e) {
        }
        ;
    }

    ModelPreprocessor preprocessor = new ModelPreprocessor(language);
    preprocessor.setCoreferenceResolution(false);
    Set<String> dep = new HashSet<>();
    dep.add("prep");
    dep.add("appos");
    dep.add("nn");
    dep.add("dobj");
    dep.add("pobj");
    dep.add("num");
    preprocessor.setDEP(dep);

    List<File> list_files = new ArrayList<>();

    if (config.getFiles().isEmpty()) {
        File folder = new File(directory);
        File[] files = folder.listFiles();
        for (File file : files) {
            if (file.toString().contains(".ttl"))
                list_files.add(file);
        }
    } else {
        list_files.addAll(config.getFiles());
    }
    System.out.println(list_files.size());

    int sentence_counter = 0;
    Map<String, Set<Integer>> bag_words_uri = new HashMap<>();
    Map<String, Integer> mapping_word_id = new HashMap<>();
    for (File file : list_files) {
        Model model = RDFDataMgr.loadModel(file.toString());
        for (Model sentence : getSentences(model)) {
            String reference = getReference(sentence);
            reference = reference.replace("http://dbpedia/", "http://dbpedia.org/");
            if (uris.contains(reference)) {
                sentence_counter += 1;
                Set<Integer> words_ids = getBagOfWords(sentence, stopwords, mapping_word_id);
                //TODO: add sentence preprocessing
                String obj = getObject(sentence);
                String subj = getSubject(sentence);
                preprocessor.preprocess(sentence, subj, obj, language);
                //TODO: also return marker if object or subject of property (in SPARQL this has to be optional of course)
                String parsed_sentence = getParsedSentence(sentence);
                try (FileWriter fw = new FileWriter("mapping_sentences_to_ids_goldstandard.tsv", true);
                        BufferedWriter bw = new BufferedWriter(fw);
                        PrintWriter out = new PrintWriter(bw)) {
                    out.println(sentence_counter + "\t" + parsed_sentence);
                } catch (IOException e) {
                    e.printStackTrace();
                }
                for (Integer word_id : words_ids) {
                    if (mapping_words_sentences.containsKey(word_id)) {
                        Set<Integer> tmp_set = mapping_words_sentences.get(word_id);
                        tmp_set.add(sentence_counter);
                        mapping_words_sentences.put(word_id, tmp_set);

                    } else {
                        Set<Integer> tmp_set = new HashSet<>();
                        tmp_set.add(sentence_counter);
                        mapping_words_sentences.put(word_id, tmp_set);
                    }

                }
                if (bag_words_uri.containsKey(reference)) {
                    Set<Integer> tmp = bag_words_uri.get(reference);
                    for (Integer w : words_ids) {
                        tmp.add(w);

                    }
                    bag_words_uri.put(reference, tmp);
                } else {
                    Set<Integer> tmp = new HashSet<>();
                    for (Integer w : words_ids) {
                        tmp.add(w);
                    }
                    bag_words_uri.put(reference, tmp);
                }
            }

        }
        model.close();

    }

    PrintWriter writer = new PrintWriter("bag_of_words_only_goldstandard.tsv");
    StringBuilder string_builder = new StringBuilder();
    for (String r : bag_words_uri.keySet()) {
        string_builder.append(r);
        for (Integer i : bag_words_uri.get(r)) {
            string_builder.append("\t");
            string_builder.append(i);
        }
        string_builder.append("\n");
    }
    writer.write(string_builder.toString());
    writer.close();

    writer = new PrintWriter("mapping_words_to_sentenceids_goldstandard.tsv");
    string_builder = new StringBuilder();
    for (Integer w : mapping_words_sentences.keySet()) {
        string_builder.append(w);
        for (int i : mapping_words_sentences.get(w)) {
            string_builder.append("\t");
            string_builder.append(i);
        }
        string_builder.append("\n");
    }
    writer.write(string_builder.toString());
    writer.close();

}

From source file:CSV_ReportingConsolidator.java

public static void main(String[] args) throws IOException {

    // Construct an array containing the list of files in the input folder
    String inputPath = "input/"; // Set the directory containing the CSV files
    String outputPath = "output/"; // Set the output directory for the consolidated report
    String outputFile = "Consolidated_CSV_Report.csv";
    File folder = new File(inputPath); // Load the selected path
    File[] listOfFiles = folder.listFiles(); // Retrieve the list of files from the directory

    // Serialize the reference headers to write the output CSV header
    CSVReader referenceReader = new CSVReader(new FileReader("reference/example_fields.csv"));
    String[] referenceHeaders = referenceReader.readNext();
    CSVWriter writer = new CSVWriter(new FileWriter(outputPath + outputFile), ',',
            CSVWriter.NO_QUOTE_CHARACTER);

    System.out.println("-- CSV parser initiated, found " + listOfFiles.length + " input files.\n");

    for (int i = 0; i < listOfFiles.length; i++) {
        if (listOfFiles[i].isFile()) {
            String filename = listOfFiles[i].getName(); // Retrieve the file name

            if (!filename.endsWith("csv")) { // Check if the file has a CSV extension
                System.out.println("EE | Fatal error: The input path contains non-csv files: " + filename
                        + ".\n Please remove them and try again.");
                writer.close();//  ww w .j  a v  a 2s .c o m
                System.exit(1); // Exit if non-CSV files are found
            }

            String filePath = String.valueOf(inputPath + filename); // Combine the path with the filename
            File file = new File(filePath);
            CSVReader csvFile = new CSVReader(new FileReader(filePath));
            String[] nextLine; // CSV line data container
            int rowIterator = 0; // Used to loop between rows
            int colIterator = 0; // Used to loop between columns
            int rowCount = 0; // Used to count the total number of rows
            int pageCount = 0;
            int f = 0;

            String[] pageName = new String[100]; // Holder for Page names
            double[] individualPRT = new double[100]; // Holder for Page Response Times
            String PTrun = ""; // Name of Performance Test Run
            String startTime = ""; // Test start time

            double PRT = 0; // Average Page Response Time
            double PRd = 0; // Page Response Time Standard Deviation
            double ERT = 0; // Average Element Response Time
            double ERd = 0; // Element Response Time Standard Deviation
            double MRT = 0; // Maximum Page Response Time
            double mRT = 0; // Minimum Page Response Time
            int elapsedTime = 0; // Test Elapsed Time
            int completedUsers = 0; // Number of Completed Users
            int TPA = 0; // Total Page Attempts
            int TPH = 0; // Total Page Hits
            int TEA = 0; // Total Element Attempts
            int TEH = 0; // Total Element Hits

            // Fetch the total row count:
            FileReader fr = new FileReader(file);
            LineNumberReader ln = new LineNumberReader(fr);
            while (ln.readLine() != null) {
                rowCount++;
            }
            ln.close(); // Close the file reader

            // Fetch test identification data:
            nextLine = csvFile.readNext();
            PTrun = nextLine[1]; // Name of Performance Test Run
            nextLine = csvFile.readNext();
            startTime = nextLine[1]; // Performance Test Start Time

            // Skip 9 uninteresting rows:
            while (rowIterator < 9) {
                nextLine = csvFile.readNext();
                rowIterator++;
            }

            // Check if there are VP fails (adds another column)
            if (nextLine[9].equals("Total Page VPs Error For Run")) {
                f = 2;
            } else if (nextLine[8].equals("Total Page VPs Failed For Run")
                    || nextLine[8].equals("Total Page VPs Error For Run")) {
                f = 1;
            } else {
                f = 0;
            }

            // Read the page titles:
            while (colIterator != -1) {
                pageName[colIterator] = nextLine[colIterator + 18 + f];
                if ((pageName[colIterator].equals(pageName[0])) && colIterator > 0) {
                    pageCount = colIterator;
                    pageName[colIterator] = null;
                    colIterator = -1; // Detects when the page titles start to repeat
                } else {
                    colIterator++;
                }
            }

            // Retrieve non-continuous performance data, auto-detect gaps, auto-convert in seconds where needed
            nextLine = csvFile.readNext();
            nextLine = csvFile.readNext();
            while (rowIterator < rowCount - 3) {
                if (nextLine.length > 1) {
                    if (nextLine[0].length() != 0) {
                        elapsedTime = Integer.parseInt(nextLine[0]) / 1000;
                    }
                }
                if (nextLine.length > 5) {
                    if (nextLine[5].length() != 0) {
                        completedUsers = Integer.parseInt(nextLine[5]);
                    }
                }
                if (nextLine.length > 8 + f) {
                    if (nextLine[8 + f].length() != 0) {
                        TPA = (int) Double.parseDouble(nextLine[8 + f]);
                    }
                }
                if (nextLine.length > 9 + f) {
                    if (nextLine[9 + f].length() != 0) {
                        TPH = (int) Double.parseDouble(nextLine[9 + f]);
                    }
                }
                if (nextLine.length > 14 + f) {
                    if (nextLine[14 + f].length() != 0) {
                        TEA = (int) Double.parseDouble(nextLine[14 + f]);
                    }
                }
                if (nextLine.length > 15 + f) {
                    if (nextLine[15 + f].length() != 0) {
                        TEH = (int) Double.parseDouble(nextLine[15 + f]);
                    }
                }
                if (nextLine.length > 10 + f) {
                    if (nextLine[10 + f].length() != 0) {
                        PRT = Double.parseDouble(nextLine[10 + f]) / 1000;
                    }
                }
                if (nextLine.length > 11 + f) {
                    if (nextLine[11 + f].length() != 0) {
                        PRd = Double.parseDouble(nextLine[11 + f]) / 1000;
                    }
                }
                if (nextLine.length > 16 + f) {
                    if (nextLine[16 + f].length() != 0) {
                        ERT = Double.parseDouble(nextLine[16 + f]) / 1000;
                    }
                }
                if (nextLine.length > 17 + f) {
                    if (nextLine[17 + f].length() != 0) {
                        ERd = Double.parseDouble(nextLine[17 + f]) / 1000;
                    }
                }
                if (nextLine.length > 12 + f) {
                    if (nextLine[12 + f].length() != 0) {
                        MRT = Double.parseDouble(nextLine[12 + f]) / 1000;
                    }
                }
                if (nextLine.length > 13 + f) {
                    if (nextLine[13 + f].length() != 0) {
                        mRT = Double.parseDouble(nextLine[13 + f]) / 1000;
                    }
                }

                nextLine = csvFile.readNext();
                rowIterator++;
            }

            // Convert the elapsed time from seconds to HH:MM:SS format
            int hours = elapsedTime / 3600, remainder = elapsedTime % 3600, minutes = remainder / 60,
                    seconds = remainder % 60;
            String eTime = (hours < 10 ? "0" : "") + hours + ":" + (minutes < 10 ? "0" : "") + minutes + ":"
                    + (seconds < 10 ? "0" : "") + seconds;

            csvFile.close(); // File recycled to reset the line parser
            CSVReader csvFile2 = new CSVReader(new FileReader(filePath));

            // Reset iterators to allow re-usage:
            rowIterator = 0;
            colIterator = 0;

            // Skip first 13 rows:
            while (rowIterator < 13) {
                nextLine = csvFile2.readNext();
                rowIterator++;
            }

            // Dynamically retrieve individual page response times in seconds, correlate with page names:
            while (rowIterator < rowCount) {
                while (colIterator < pageCount) {
                    if (nextLine.length > 18 + f) {
                        if (nextLine[colIterator + 18 + f].length() != 0) {
                            individualPRT[colIterator] = Double.parseDouble(nextLine[colIterator + 18 + f])
                                    / 1000;
                        }
                    }
                    colIterator++;
                }
                nextLine = csvFile2.readNext();
                rowIterator++;
                colIterator = 0;
            }

            csvFile2.close(); // Final file closing

            // Reset iterators to allow re-usage:
            rowIterator = 0;
            colIterator = 0;

            // Display statistics in console, enable only for debugging purposes:
            /*
            System.out.println(" Elapsed Time: " + elapsedTime
                   + "\n Completed Users: " + completedUsers
                   + "\n Total Page Attempts: " + TPA
                   + "\n Total Page Hits: " + TPH
                   + "\n Average Response Time For All Pages For Run: " + PRT
                   + "\n Response Time Standard Deviation For All Pages For Run: " + PRd
                   + "\n Maximum Response Time For All Pages For Run: " + MRT
                   + "\n Minimum Response Time For All Pages For Run: " + mRT
                   + "\n Total Page Element Attempts: " + TEA
                   + "\n Total Page Element Hits: " + TEH
                   + "\n Average Response Time For All Page Elements For Run: " + ERT
                   + "\n Response Time Standard Deviation For All Page Elements For Run: " + ERd
                   + "\n");
                    
            // Display individual page response times in console:
            while (colIterator < 9)   {
               System.out.println("Page " + Page[colIterator] + " - Response Time: " + PagePRT[colIterator]);
               colIterator++;
            }
            */

            // Serialize individual Page Response Times into CSV values
            StringBuffer individualPRTList = new StringBuffer();
            if (individualPRT.length > 0) {
                individualPRTList.append(String.valueOf(individualPRT[0]));
                for (int k = 1; k < pageCount; k++) {
                    individualPRTList.append(",");
                    individualPRTList.append(String.valueOf(individualPRT[k]));
                }
            }

            // Serialize all retrieved performance parameters:
            String[] entries = { PTrun, startTime, String.valueOf(completedUsers), eTime, String.valueOf(TPA),
                    String.valueOf(TPH), String.valueOf(PRT), String.valueOf(PRd), String.valueOf(MRT),
                    String.valueOf(mRT), String.valueOf(TEA), String.valueOf(TEH), String.valueOf(ERT),
                    String.valueOf(ERd), "", individualPRTList.toString(), };

            // Define header and write it to the first CSV row
            Object[] headerConcatenator = ArrayUtils.addAll(referenceHeaders, pageName);
            String[] header = new String[referenceHeaders.length + pageCount];
            header = Arrays.copyOf(headerConcatenator, header.length, String[].class);

            if (i == 0) {
                writer.writeNext(header); // Write CSV header
            }
            writer.writeNext(entries); // Write performance parameters in CSV format
            System.out.println("== Processed: " + filename + " ===========================");
        }
    }
    writer.close(); // Close the CSV writer
    System.out.println("\n-- Done processing " + listOfFiles.length + " files."
            + "\n-- The consolidated report has been saved to " + outputPath + outputFile);
}

From source file:CSV_ReportingConsolidator.java

public static void main(String[] args) throws IOException {

    // Construct an array containing the list of files in the input folder
    String inputPath = "input/"; // Set the directory containing the CSV files
    String outputPath = "output/"; // Set the output directory for the consolidated report
    String outputFile = "Consolidated_CSV_Report.csv";
    File folder = new File(inputPath); // Load the selected path
    File[] listOfFiles = folder.listFiles(); // Retrieve the list of files from the directory

    // Serialize the reference headers to write the output CSV header
    CSVReader referenceReader = new CSVReader(new FileReader("reference/example_fields.csv"));
    String[] referenceHeaders = referenceReader.readNext();
    CSVWriter writer = new CSVWriter(new FileWriter(outputPath + outputFile), ',',
            CSVWriter.NO_QUOTE_CHARACTER);

    System.out.println("-- CSV parser initiated, found " + listOfFiles.length + " input files.\n");

    for (int i = 0; i < listOfFiles.length; i++) {
        if (listOfFiles[i].isFile()) {
            String filename = listOfFiles[i].getName(); // Retrieve the file name

            if (!filename.endsWith("csv")) { // Check if the file has a CSV extension
                System.out.println("EE | Fatal error: The input path contains non-csv files: " + filename
                        + ".\n Please remove them and try again.");
                writer.close();// w w  w. ja v  a2 s.  co m
                System.exit(1); // Exit if non-CSV files are found
            }

            String filePath = String.valueOf(inputPath + filename); // Combine the path with the filename
            File file = new File(filePath);
            CSVReader csvFile = new CSVReader(new FileReader(filePath));
            String[] nextLine; // CSV line data container
            int rowIterator = 0; // Used to loop between rows
            int colIterator = 0; // Used to loop between columns
            int rowCount = 0; // Used to count the total number of rows
            int pageCount = 0;
            int f = 0;

            String[] pageName = new String[100]; // Holder for Page names
            double[] individualPRT = new double[100]; // Holder for Page Response Times
            String PTrun = ""; // Name of Performance Test Run
            String startTime = ""; // Test start time

            double PRT = 0; // Average Page Response Time
            double PRd = 0; // Page Response Time Standard Deviation
            double ERT = 0; // Average Element Response Time
            double ERd = 0; // Element Response Time Standard Deviation
            double MRT = 0; // Maximum Page Response Time
            double mRT = 0; // Minimum Page Response Time
            int elapsedTime = 0; // Test Elapsed Time
            int completedUsers = 0; // Number of Completed Users
            int TPA = 0; // Total Page Attempts
            int TPH = 0; // Total Page Hits
            int TEA = 0; // Total Element Attempts
            int TEH = 0; // Total Element Hits

            // Fetch the total row count:
            FileReader fr = new FileReader(file);
            LineNumberReader ln = new LineNumberReader(fr);
            while (ln.readLine() != null) {
                rowCount++;
            }
            ln.close(); // Close the file reader

            // Fetch test identification data:
            nextLine = csvFile.readNext();
            PTrun = nextLine[1]; // Name of Performance Test Run
            nextLine = csvFile.readNext();
            startTime = nextLine[1]; // Performance Test Start Time

            // Skip 9 uninteresting rows:
            while (rowIterator < 9) {
                nextLine = csvFile.readNext();
                rowIterator++;
            }

            // Check if there are VP fails (adds another column)
            if (nextLine[9].equals("Total Page VPs Error For Run")) {
                f = 2;
            } else if (nextLine[8].equals("Total Page VPs Failed For Run")
                    || nextLine[8].equals("Total Page VPs Error For Run")) {
                f = 1;
            } else {
                f = 0;
            }

            // Read the page titles:
            while (colIterator != -1) {
                pageName[colIterator] = nextLine[colIterator + 16 + f];
                if ((pageName[colIterator].equals(pageName[0])) && colIterator > 0) {
                    pageCount = colIterator;
                    pageName[colIterator] = null;
                    colIterator = -1; // Detects when the page titles start to repeat
                } else {
                    colIterator++;
                }
            }

            // Retrieve non-continuous performance data, auto-detect gaps, auto-convert in seconds where needed
            nextLine = csvFile.readNext();
            nextLine = csvFile.readNext();
            while (rowIterator < rowCount - 3) {
                if (nextLine.length > 1) {
                    if (nextLine[0].length() != 0) {
                        elapsedTime = Integer.parseInt(nextLine[0]) / 1000;
                    }
                }
                if (nextLine.length > 4) {
                    if (nextLine[4].length() != 0) {
                        completedUsers = Integer.parseInt(nextLine[4]);
                    }
                }
                if (nextLine.length > 6 + f) {
                    if (nextLine[6 + f].length() != 0) {
                        TPA = (int) Double.parseDouble(nextLine[6 + f]);
                    }
                }
                if (nextLine.length > 7 + f) {
                    if (nextLine[7 + f].length() != 0) {
                        TPH = (int) Double.parseDouble(nextLine[7 + f]);
                    }
                }
                if (nextLine.length > 12 + f) {
                    if (nextLine[12 + f].length() != 0) {
                        TEA = (int) Double.parseDouble(nextLine[12 + f]);
                    }
                }
                if (nextLine.length > 13 + f) {
                    if (nextLine[13 + f].length() != 0) {
                        TEH = (int) Double.parseDouble(nextLine[13 + f]);
                    }
                }
                if (nextLine.length > 8 + f) {
                    if (nextLine[8 + f].length() != 0) {
                        PRT = Double.parseDouble(nextLine[8 + f]) / 1000;
                    }
                }
                if (nextLine.length > 9 + f) {
                    if (nextLine[9 + f].length() != 0) {
                        PRd = Double.parseDouble(nextLine[9 + f]) / 1000;
                    }
                }
                if (nextLine.length > 14 + f) {
                    if (nextLine[14 + f].length() != 0) {
                        ERT = Double.parseDouble(nextLine[14 + f]) / 1000;
                    }
                }
                if (nextLine.length > 15 + f) {
                    if (nextLine[15 + f].length() != 0) {
                        ERd = Double.parseDouble(nextLine[15 + f]) / 1000;
                    }
                }
                if (nextLine.length > 10 + f) {
                    if (nextLine[10 + f].length() != 0) {
                        MRT = Double.parseDouble(nextLine[10 + f]) / 1000;
                    }
                }
                if (nextLine.length > 11 + f) {
                    if (nextLine[11 + f].length() != 0) {
                        mRT = Double.parseDouble(nextLine[11 + f]) / 1000;
                    }
                }

                nextLine = csvFile.readNext();
                rowIterator++;
            }

            // Convert the elapsed time from seconds to HH:MM:SS format
            int hours = elapsedTime / 3600, remainder = elapsedTime % 3600, minutes = remainder / 60,
                    seconds = remainder % 60;
            String eTime = (hours < 10 ? "0" : "") + hours + ":" + (minutes < 10 ? "0" : "") + minutes + ":"
                    + (seconds < 10 ? "0" : "") + seconds;

            csvFile.close(); // File recycled to reset the line parser
            CSVReader csvFile2 = new CSVReader(new FileReader(filePath));

            // Reset iterators to allow re-usage:
            rowIterator = 0;
            colIterator = 0;

            // Skip first 13 rows:
            while (rowIterator < 13) {
                nextLine = csvFile2.readNext();
                rowIterator++;
            }

            // Dynamically retrieve individual page response times in seconds, correlate with page names:
            while (rowIterator < rowCount) {
                while (colIterator < pageCount) {
                    if (nextLine.length > 16 + f) {
                        if (nextLine[colIterator + 16 + f].length() != 0) {
                            individualPRT[colIterator] = Double.parseDouble(nextLine[colIterator + 16 + f])
                                    / 1000;
                        }
                    }
                    colIterator++;
                }
                nextLine = csvFile2.readNext();
                rowIterator++;
                colIterator = 0;
            }

            csvFile2.close(); // Final file closing

            // Reset iterators to allow re-usage:
            rowIterator = 0;
            colIterator = 0;

            // Display statistics in console, enable only for debugging purposes:
            /*
            System.out.println(" Elapsed Time: " + elapsedTime
                   + "\n Completed Users: " + completedUsers
                   + "\n Total Page Attempts: " + TPA
                   + "\n Total Page Hits: " + TPH
                   + "\n Average Response Time For All Pages For Run: " + PRT
                   + "\n Response Time Standard Deviation For All Pages For Run: " + PRd
                   + "\n Maximum Response Time For All Pages For Run: " + MRT
                   + "\n Minimum Response Time For All Pages For Run: " + mRT
                   + "\n Total Page Element Attempts: " + TEA
                   + "\n Total Page Element Hits: " + TEH
                   + "\n Average Response Time For All Page Elements For Run: " + ERT
                   + "\n Response Time Standard Deviation For All Page Elements For Run: " + ERd
                   + "\n");
                    
            // Display individual page response times in console:
            while (colIterator < 9)   {
               System.out.println("Page " + Page[colIterator] + " - Response Time: " + PagePRT[colIterator]);
               colIterator++;
            }
            */

            // Serialize individual Page Response Times into CSV values
            StringBuffer individualPRTList = new StringBuffer();
            if (individualPRT.length > 0) {
                individualPRTList.append(String.valueOf(individualPRT[0]));
                for (int k = 1; k < pageCount; k++) {
                    individualPRTList.append(",");
                    individualPRTList.append(String.valueOf(individualPRT[k]));
                }
            }

            // Serialize all retrieved performance parameters:
            String[] entries = { PTrun, startTime, String.valueOf(completedUsers), eTime, String.valueOf(TPA),
                    String.valueOf(TPH), String.valueOf(PRT), String.valueOf(PRd), String.valueOf(MRT),
                    String.valueOf(mRT), String.valueOf(TEA), String.valueOf(TEH), String.valueOf(ERT),
                    String.valueOf(ERd), "", individualPRTList.toString(), };

            // Define header and write it to the first CSV row
            Object[] headerConcatenator = ArrayUtils.addAll(referenceHeaders, pageName);
            String[] header = new String[referenceHeaders.length + pageCount];
            header = Arrays.copyOf(headerConcatenator, header.length, String[].class);

            if (i == 0) {
                writer.writeNext(header); // Write CSV header
            }
            writer.writeNext(entries); // Write performance parameters in CSV format
            System.out.println("== Processed: " + filename + " ===========================");
        }
    }
    writer.close(); // Close the CSV writer
    System.out.println("\n-- Done processing " + listOfFiles.length + " files."
            + "\n-- The consolidated report has been saved to " + outputPath + outputFile);
}

From source file:com.opendesign.utils.ThumbnailManager.java

public static void main(String[] args) {

    //      //product ? 
    //      saveThumbDesignWorkMedium(thumbnailPath); //? ??
    //      saveThumbDesignWorkLarge(thumbnailPath); //? ??
    //      // w  ww .j  av a 2s.  c  o  m
    //      //project ?
    //      saveThumbDesignWorkMedium(thumbnailPath); //? ??
    //      saveThumbProjectWorkMedium(thumbnailPath); //? ??
    //      
    //      //work ?
    //      saveThumbProjectWorkSmall(thumbnailPath); //? ??
    //      saveThumbProjectWorkLarge(thumbnailPath); //? ??

    File productFolder = new File(
            "/Users/windfall/Desktop/00_working/02_?/02_01_OpenDesign/02_01_100_temp/km_upload/product");

    int updated = 0;
    int total = 0;
    for (File image : productFolder.listFiles()) {
        if (image != null) {

            System.out.println("resizing " + image.getAbsolutePath());
            saveThumbDesignWorkMedium(image.getAbsolutePath()); //? ??
            saveThumbDesignWorkLarge(image.getAbsolutePath()); //? ??
            updated++;

        } else {
            System.out.println("Fail=" + image);
        }
        total++;
    }

    System.out.println("Product Files=[" + total + "] updated=[" + updated + "]");

    updated = 0;
    total = 0;
    File projectFolder = new File(
            "/Users/windfall/Desktop/00_working/02_?/02_01_OpenDesign/02_01_100_temp/km_upload/project");

    for (File image : projectFolder.listFiles()) {
        if (image != null) {

            System.out.println("resizing " + image.getAbsolutePath());
            saveThumbDesignWorkMedium(image.getAbsolutePath()); //? ??
            saveThumbProjectWorkMedium(image.getAbsolutePath()); //? ??
            updated++;

        } else {
            System.out.println("Fail=" + image);
        }
        total++;
    }

    System.out.println("Project Files=[" + total + "] updated=[" + updated + "]");

    updated = 0;
    total = 0;

    File workFolder = new File(
            "/Users/windfall/Desktop/00_working/02_?/02_01_OpenDesign/02_01_100_temp/km_upload/project_work_file");

    for (File image : workFolder.listFiles()) {
        if (image != null) {

            System.out.println("resizing " + image.getAbsolutePath());
            saveThumbProjectWorkSmall(image.getAbsolutePath()); //? ??
            saveThumbProjectWorkLarge(image.getAbsolutePath()); //? ??
            updated++;

        } else {
            System.out.println("Fail=" + image);
        }
        total++;
    }

    System.out.println("Work Files=[" + total + "] updated=[" + updated + "]");

}

From source file:iaj.linkit.App.java

/**
 * Application entry method./*from  ww  w  . j  av  a2 s  .com*/
 *
 * @param args Command line arguments.
 */
public static void main(final String[] args) {
    final Configuration config = new Configuration();
    final CmdLineParser parser = new CmdLineParser(config);

    try {
        parser.parseArgument(args);

        final File f = (config.getPaths().size() > 0) ? new File(config.getPaths().get(0)).getCanonicalFile()
                : new File(DIR_CURRENT).getCanonicalFile();
        if (!f.exists()) {
            System.err.println("No such path: " + f.getCanonicalFile());
            System.exit(1);
        } else if (!f.canRead()) {
            System.err.println("Can't read path.");
            System.exit(1);
        } else if (f.isDirectory()) {
            handleFiles(config.isRecursive(), f.listFiles());
        } else {
            handleFiles(config.isRecursive(), f);
        }

    } catch (IOException e) {
        e.printStackTrace();
        System.exit(1);
    } catch (final CmdLineException e) {
        System.err.println(e.getMessage());
        System.err.println("lu [options...] [paths...]"); //$NON-NLS-1$
        parser.printUsage(System.err);
        System.exit(1);
    }
}

From source file:esiptestbed.mudrod.ontology.process.LocalOntology.java

public static void main(String[] args) throws Exception {

    // boolean options
    Option helpOpt = new Option("h", "help", false, "show this help message");
    // argument options
    Option ontDirOpt = Option.builder(ONT_DIR).required(true).numberOfArgs(1).hasArg(true)
            .desc("A directory containing .owl files.").argName(ONT_DIR).build();

    // create the options
    Options options = new Options();
    options.addOption(helpOpt);/*from  w ww.j  av  a2  s  . c om*/
    options.addOption(ontDirOpt);

    String ontDir;
    CommandLineParser parser = new DefaultParser();
    try {
        CommandLine line = parser.parse(options, args);

        if (line.hasOption(ONT_DIR)) {
            ontDir = line.getOptionValue(ONT_DIR).replace("\\", "/");
        } else {
            ontDir = LocalOntology.class.getClassLoader().getResource("ontology").getFile();
        }
        if (!ontDir.endsWith("/")) {
            ontDir += "/";
        }
    } catch (Exception e) {
        LOG.error("Error whilst processing main method of LocalOntology.", e);
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("LocalOntology: 'ontDir' argument is mandatory. ", options, true);
        return;
    }
    File fileDir = new File(ontDir);
    //Fail if the input is not a directory.
    if (fileDir.isDirectory()) {
        List<String> owlFiles = new ArrayList<>();
        for (File owlFile : fileDir.listFiles()) {
            owlFiles.add(owlFile.toString());
        }
        MudrodEngine mEngine = new MudrodEngine();
        Properties props = mEngine.loadConfig();
        Ontology ontology = new OntologyFactory(props).getOntology();
        //convert to correct iput for ontology loading.
        String[] owlArray = new String[owlFiles.size()];
        owlArray = owlFiles.toArray(owlArray);
        ontology.load(owlArray);

        String[] terms = new String[] { "Glacier ice" };
        //Demonstrate that we can do basic ontology heirarchy navigation and log output.
        for (Iterator<OntClass> i = getParser().rootClasses(getModel()); i.hasNext();) {

            //print Ontology Class Hierarchy
            OntClass c = i.next();
            renderHierarchy(System.out, c, new LinkedList<>(), 0);

            for (Iterator<OntClass> subClass = c.listSubClasses(true); subClass.hasNext();) {
                OntClass sub = subClass.next();
                //This means that the search term is present as an OntClass
                if (terms[0].equalsIgnoreCase(sub.getLabel(null))) {
                    //Add the search term(s) above to the term cache.
                    for (int j = 0; j < terms.length; j++) {
                        addSearchTerm(terms[j], sub);
                    }

                    //Query the ontology and return subclasses of the search term(s)
                    for (int k = 0; k < terms.length; k++) {
                        Iterator<String> iter = ontology.subclasses(terms[k]);
                        while (iter.hasNext()) {
                            LOG.info("Subclasses >> " + iter.next());
                        }
                    }

                    //print any synonymic relationships to demonstrate that we can 
                    //undertake synonym-based query expansion
                    for (int l = 0; l < terms.length; l++) {
                        Iterator<String> iter = ontology.synonyms(terms[l]);
                        while (iter.hasNext()) {
                            LOG.info("Synonym >> " + iter.next());
                        }
                    }
                }
            }
        }

        mEngine.end();
    }

}

From source file:gov.nasa.jpl.mudrod.ontology.process.LocalOntology.java

public static void main(String[] args) throws Exception {

    // boolean options
    Option helpOpt = new Option("h", "help", false, "show this help message");
    // argument options
    Option ontDirOpt = OptionBuilder.hasArg(true).withArgName(ONT_DIR)
            .withDescription("A directory containing .owl files.").isRequired(false).create();

    // create the options
    Options options = new Options();
    options.addOption(helpOpt);//from w w  w.  j a  va 2  s.  co  m
    options.addOption(ontDirOpt);

    String ontDir;
    CommandLineParser parser = new GnuParser();
    try {
        CommandLine line = parser.parse(options, args);

        if (line.hasOption(ONT_DIR)) {
            ontDir = line.getOptionValue(ONT_DIR).replace("\\", "/");
        } else {
            ontDir = LocalOntology.class.getClassLoader().getResource("ontology").getFile();
        }
        if (!ontDir.endsWith("/")) {
            ontDir += "/";
        }
    } catch (Exception e) {
        LOG.error("Error whilst processing main method of LocalOntology.", e);
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("LocalOntology: 'ontDir' argument is mandatory. ", options, true);
        return;
    }
    File fileDir = new File(ontDir);
    //Fail if the input is not a directory.
    if (fileDir.isDirectory()) {
        List<String> owlFiles = new ArrayList<>();
        for (File owlFile : fileDir.listFiles()) {
            owlFiles.add(owlFile.toString());
        }
        MudrodEngine mEngine = new MudrodEngine();
        Properties props = mEngine.loadConfig();
        Ontology ontology = new OntologyFactory(props).getOntology();
        //convert to correct iput for ontology loading.
        String[] owlArray = new String[owlFiles.size()];
        owlArray = owlFiles.toArray(owlArray);
        ontology.load(owlArray);

        String[] terms = new String[] { "Glacier ice" };
        //Demonstrate that we can do basic ontology heirarchy navigation and log output.
        for (Iterator<OntClass> i = getParser().rootClasses(getModel()); i.hasNext();) {

            //print Ontology Class Hierarchy
            OntClass c = i.next();
            renderHierarchy(System.out, c, new LinkedList<>(), 0);

            for (Iterator<OntClass> subClass = c.listSubClasses(true); subClass.hasNext();) {
                OntClass sub = subClass.next();
                //This means that the search term is present as an OntClass
                if (terms[0].equalsIgnoreCase(sub.getLabel(null))) {
                    //Add the search term(s) above to the term cache.
                    for (int j = 0; j < terms.length; j++) {
                        addSearchTerm(terms[j], sub);
                    }

                    //Query the ontology and return subclasses of the search term(s)
                    for (int k = 0; k < terms.length; k++) {
                        Iterator<String> iter = ontology.subclasses(terms[k]);
                        while (iter.hasNext()) {
                            LOG.info("Subclasses >> " + iter.next());
                        }
                    }

                    //print any synonymic relationships to demonstrate that we can 
                    //undertake synonym-based query expansion
                    for (int l = 0; l < terms.length; l++) {
                        Iterator<String> iter = ontology.synonyms(terms[l]);
                        while (iter.hasNext()) {
                            LOG.info("Synonym >> " + iter.next());
                        }
                    }
                }
            }
        }

        mEngine.end();
    }

}

From source file:DIA_Umpire_Quant.DIA_Umpire_ExtLibSearch.java

/**
 * @param args the command line arguments
 *///  www. ja v  a 2 s .co  m
public static void main(String[] args) throws FileNotFoundException, IOException, Exception {
    System.out.println(
            "=================================================================================================");
    System.out.println("DIA-Umpire targeted re-extraction analysis using external library (version: "
            + UmpireInfo.GetInstance().Version + ")");
    if (args.length != 1) {
        System.out.println(
                "command format error, the correct format should be: java -jar -Xmx10G DIA_Umpire_ExtLibSearch.jar diaumpire_module.params");
        return;
    }
    try {
        ConsoleLogger.SetConsoleLogger(Level.INFO);
        ConsoleLogger.SetFileLogger(Level.DEBUG,
                FilenameUtils.getFullPath(args[0]) + "diaumpire_extlibsearch.log");
    } catch (Exception e) {
    }

    Logger.getRootLogger().info("Version: " + UmpireInfo.GetInstance().Version);
    Logger.getRootLogger().info("Parameter file:" + args[0]);

    BufferedReader reader = new BufferedReader(new FileReader(args[0]));
    String line = "";
    String WorkFolder = "";
    int NoCPUs = 2;

    String ExternalLibPath = "";
    String ExternalLibDecoyTag = "DECOY";

    float ExtProbThreshold = 0.99f;
    float RTWindow_Ext = -1f;

    TandemParam tandemPara = new TandemParam(DBSearchParam.SearchInstrumentType.TOF5600);
    HashMap<String, File> AssignFiles = new HashMap<>();

    //<editor-fold defaultstate="collapsed" desc="Reading parameter file">
    while ((line = reader.readLine()) != null) {
        line = line.trim();
        Logger.getRootLogger().info(line);
        if (!"".equals(line) && !line.startsWith("#")) {
            //System.out.println(line);
            if (line.equals("==File list begin")) {
                do {
                    line = reader.readLine();
                    line = line.trim();
                    if (line.equals("==File list end")) {
                        continue;
                    } else if (!"".equals(line)) {
                        File newfile = new File(line);
                        if (newfile.exists()) {
                            AssignFiles.put(newfile.getAbsolutePath(), newfile);
                        } else {
                            Logger.getRootLogger().info("File: " + newfile + " does not exist.");
                        }
                    }
                } while (!line.equals("==File list end"));
            }
            if (line.split("=").length < 2) {
                continue;
            }
            String type = line.split("=")[0].trim();
            String value = line.split("=")[1].trim();
            switch (type) {

            case "Path": {
                WorkFolder = value;
                break;
            }
            case "path": {
                WorkFolder = value;
                break;
            }
            case "Thread": {
                NoCPUs = Integer.parseInt(value);
                break;
            }
            case "Fasta": {
                tandemPara.FastaPath = value;
                break;
            }
            case "DecoyPrefix": {
                if (!"".equals(value)) {
                    tandemPara.DecoyPrefix = value;
                }
                break;
            }
            case "ExternalLibPath": {
                ExternalLibPath = value;
                break;
            }
            case "ExtProbThreshold": {
                ExtProbThreshold = Float.parseFloat(value);
                break;
            }
            case "RTWindow_Ext": {
                RTWindow_Ext = Float.parseFloat(value);
                break;
            }
            case "ExternalLibDecoyTag": {
                ExternalLibDecoyTag = value;
                if (ExternalLibDecoyTag.endsWith("_")) {
                    ExternalLibDecoyTag = ExternalLibDecoyTag.substring(0, ExternalLibDecoyTag.length() - 1);
                }
                break;
            }
            }
        }
    }
    //</editor-fold>

    //Initialize PTM manager using compomics library
    PTMManager.GetInstance();

    //Check if the fasta file can be found
    if (!new File(tandemPara.FastaPath).exists()) {
        Logger.getRootLogger().info("Fasta file :" + tandemPara.FastaPath
                + " cannot be found, the process will be terminated, please check.");
        System.exit(1);
    }

    //Generate DIA file list
    ArrayList<DIAPack> FileList = new ArrayList<>();

    File folder = new File(WorkFolder);
    if (!folder.exists()) {
        Logger.getRootLogger().info("The path : " + WorkFolder + " cannot be found.");
        System.exit(1);
    }
    for (final File fileEntry : folder.listFiles()) {
        if (fileEntry.isFile()
                && (fileEntry.getAbsolutePath().toLowerCase().endsWith(".mzxml")
                        | fileEntry.getAbsolutePath().toLowerCase().endsWith(".mzml"))
                && !fileEntry.getAbsolutePath().toLowerCase().endsWith("q1.mzxml")
                && !fileEntry.getAbsolutePath().toLowerCase().endsWith("q2.mzxml")
                && !fileEntry.getAbsolutePath().toLowerCase().endsWith("q3.mzxml")) {
            AssignFiles.put(fileEntry.getAbsolutePath(), fileEntry);
        }
        if (fileEntry.isDirectory()) {
            for (final File fileEntry2 : fileEntry.listFiles()) {
                if (fileEntry2.isFile()
                        && (fileEntry2.getAbsolutePath().toLowerCase().endsWith(".mzxml")
                                | fileEntry2.getAbsolutePath().toLowerCase().endsWith(".mzml"))
                        && !fileEntry2.getAbsolutePath().toLowerCase().endsWith("q1.mzxml")
                        && !fileEntry2.getAbsolutePath().toLowerCase().endsWith("q2.mzxml")
                        && !fileEntry2.getAbsolutePath().toLowerCase().endsWith("q3.mzxml")) {
                    AssignFiles.put(fileEntry2.getAbsolutePath(), fileEntry2);
                }
            }
        }
    }

    Logger.getRootLogger().info("No. of files assigned :" + AssignFiles.size());
    for (File fileEntry : AssignFiles.values()) {
        Logger.getRootLogger().info(fileEntry.getAbsolutePath());
    }

    for (File fileEntry : AssignFiles.values()) {
        String mzXMLFile = fileEntry.getAbsolutePath();
        if (mzXMLFile.toLowerCase().endsWith(".mzxml") | mzXMLFile.toLowerCase().endsWith(".mzml")) {
            DIAPack DiaFile = new DIAPack(mzXMLFile, NoCPUs);
            Logger.getRootLogger().info(
                    "=================================================================================================");
            Logger.getRootLogger().info("Processing " + mzXMLFile);
            if (!DiaFile.LoadDIASetting()) {
                Logger.getRootLogger().info("Loading DIA setting failed, job is incomplete");
                System.exit(1);
            }
            if (!DiaFile.LoadParams()) {
                Logger.getRootLogger().info("Loading parameters failed, job is incomplete");
                System.exit(1);
            }
            Logger.getRootLogger().info("Loading identification results " + mzXMLFile + "....");

            //If the serialization file for ID file existed
            if (DiaFile.ReadSerializedLCMSID()) {
                DiaFile.IDsummary.ReduceMemoryUsage();
                DiaFile.IDsummary.FastaPath = tandemPara.FastaPath;
                FileList.add(DiaFile);
            }
        }
    }

    //<editor-fold defaultstate="collapsed" desc="Targeted re-extraction using external library">

    //External library search

    Logger.getRootLogger().info("Targeted extraction using external library");

    //Read exteranl library
    FragmentLibManager ExlibManager = FragmentLibManager.ReadFragmentLibSerialization(WorkFolder,
            FilenameUtils.getBaseName(ExternalLibPath));
    if (ExlibManager == null) {
        ExlibManager = new FragmentLibManager(FilenameUtils.getBaseName(ExternalLibPath));

        //Import traML file
        ExlibManager.ImportFragLibByTraML(ExternalLibPath, ExternalLibDecoyTag);
        //Check if there are decoy spectra
        ExlibManager.CheckDecoys();
        //ExlibManager.ImportFragLibBySPTXT(ExternalLibPath);
        ExlibManager.WriteFragmentLibSerialization(WorkFolder);
    }
    Logger.getRootLogger()
            .info("No. of peptide ions in external lib:" + ExlibManager.PeptideFragmentLib.size());
    for (DIAPack diafile : FileList) {
        if (diafile.IDsummary == null) {
            diafile.ReadSerializedLCMSID();
        }
        //Generate RT mapping
        RTMappingExtLib RTmap = new RTMappingExtLib(diafile.IDsummary, ExlibManager, diafile.GetParameter());
        RTmap.GenerateModel();
        RTmap.GenerateMappedPepIon();

        diafile.BuildStructure();
        diafile.MS1FeatureMap.ReadPeakCluster();
        diafile.GenerateMassCalibrationRTMap();
        //Perform targeted re-extraction
        diafile.TargetedExtractionQuant(false, ExlibManager, ExtProbThreshold, RTWindow_Ext);
        diafile.MS1FeatureMap.ClearAllPeaks();
        diafile.IDsummary.ReduceMemoryUsage();
        //Remove target IDs below the defined probability threshold
        diafile.IDsummary.RemoveLowProbMappedIon(ExtProbThreshold);
        diafile.ExportID();
        diafile.ClearStructure();
        Logger.getRootLogger().info("Peptide ions: " + diafile.IDsummary.GetPepIonList().size()
                + " Mapped ions: " + diafile.IDsummary.GetMappedPepIonList().size());
    }

    //</editor-fold>

    Logger.getRootLogger().info("Job done");
    Logger.getRootLogger().info(
            "=================================================================================================");

}