Example usage for java.util HashMap values

List of usage examples for java.util HashMap values

Introduction

In this page you can find the example usage for java.util HashMap values.

Prototype

public Collection<V> values() 

Source Link

Document

Returns a Collection view of the values contained in this map.

Usage

From source file:de.tor.tribes.ui.views.DSWorkbenchReportFrame.java

private void fireRebuildStatsEvent() {
    List selection = jList1.getSelectedValuesList();
    if (selection == null || selection.isEmpty()) {
        jOverallStatsArea.setText("<html>Kein Stamm ausgewhlt</html>");
        jAllyStatsArea.setText("<html>Kein Stamm ausgewhlt</html>");
        jTribeStatsArea.setText("<html>Kein Stamm ausgewhlt</html>");
        return;/*from w ww .j a v a2  s  .c o m*/
    }
    int overallDefAllies = lastStats.getDefendingAllies().length;
    int overallDefTribes = lastStats.getDefendingTribes().length;

    NumberFormat f = NumberFormat.getInstance();
    f.setMinimumFractionDigits(0);
    f.setMaximumFractionDigits(0);

    StringBuilder allyBuffer = new StringBuilder();
    StringBuilder tribeBuffer = new StringBuilder();
    HashMap<Ally, AllyStatResult> allyResults = new HashMap<>();
    OverallStatResult overallResult = new OverallStatResult();
    for (Object o : selection) {
        Ally a = (Ally) o;
        AllyStatResult result = new AllyStatResult();
        allyResults.put(a, result);
        for (Tribe t : lastStats.getAttackingTribes(a)) {
            TribeStatResult tribeResult = new TribeStatResult();
            SingleAttackerStat stats = lastStats.getStatsForTribe(t);
            tribeResult.setTribeStats(stats, jGuessUnknownLosses.isSelected());
            result.addTribeStatResult(tribeResult);
        }
        overallResult.addAllyStatsResult(result);
    }
    overallResult.setStartDate(lastStats.getStartDate());
    overallResult.setEndDate(lastStats.getEndDate());
    overallResult.setReportCount(lastStats.getReportCount());
    overallResult.setAttackerAllies(selection.size());
    overallResult.setDefenders(overallDefTribes);
    overallResult.setDefenderAllies(overallDefAllies);

    for (Ally a : allyResults.keySet()) {
        AllyStatResult res = allyResults.get(a);
        res.setAlly(a);
        res.setOverallKills(overallResult.getKills());
        res.setOverallLosses(overallResult.getLosses());

        for (TribeStatResult tRes : res.getTribeStats()) {
            tRes.setOverallKills(res.getOverallKills());
            tRes.setOverallLosses(res.getOverallLosses());
            tRes.setAllyKills(res.getKills());
            tRes.setAllyLosses(res.getLosses());
        }
    }

    try {
        List<OverallStatResult> list = Arrays.asList(overallResult);
        overallResultCodes = new OverallReportStatsFormatter().formatElements(list, true);
        jOverallStatsArea.setText("<html><head>" + BBCodeFormatter.getStyles() + "</head><body>"
                + BBCodeFormatter.toHtml(overallResultCodes) + "</body></html>");
    } catch (Exception e) {
        overallResultCodes = null;
        jOverallStatsArea.setText("<html>Fehler bei der Darstellung der Auswertung</html>");
        logger.error("Failed to render overall BB representation", e);
    }
    try {
        List<AllyStatResult> list = new LinkedList<>();
        CollectionUtils.addAll(list, allyResults.values());
        allyResultCodes = new AllyReportStatsFormatter().formatElements(list, true);
        jAllyStatsArea.setText("<html><head>" + BBCodeFormatter.getStyles() + "</head><body>"
                + BBCodeFormatter.toHtml(allyResultCodes) + "</body></html>");
    } catch (Exception e) {
        allyResultCodes = null;
        jAllyStatsArea.setText("<html>Fehler bei der Darstellung der Auswertung</html>");
        logger.error("Failed to render BB representation for allies", e);
    }

    try {
        List<TribeStatResult> list = new LinkedList<>();
        for (AllyStatResult allyStat : allyResults.values()) {
            Collections.addAll(list,
                    allyStat.getTribeStats().toArray(new TribeStatResult[allyStat.getTribeStats().size()]));
        }
        tribeResultCodes = new TribeReportStatsFormatter().formatElements(list, true);
        jTribeStatsArea.setText("<html><head>" + BBCodeFormatter.getStyles() + "</head><body>"
                + BBCodeFormatter.toHtml(tribeResultCodes) + "</body></html>");
    } catch (Exception e) {
        tribeResultCodes = null;
        jTribeStatsArea.setText("<html>Fehler bei der Darstellung der Auswertung</html>");
        logger.error("Failed to render BB representation for tribes", e);
    }
    jResultTabbedPane.setSelectedIndex(0);
}

From source file:org.redbus.arrivaltime.ArrivalTimeHelper.java

private static void getBusTimesResponse(BusDataRequest request) {
    if (request.content == null) {
        try {//from   w ww.  j  av a 2  s.c  o m
            request.callback.onAsyncGetBusTimesError(request.requestId, BUSSTATUS_HTTPERROR,
                    "A network error occurred");
        } catch (Throwable t) {
        }
        return;
    }
    if (request.content.toLowerCase().contains("doesn't exist")) {
        try {
            request.callback.onAsyncGetBusTimesError(request.requestId, BUSSTATUS_BADSTOPCODE,
                    "The BusStop code was invalid");
        } catch (Throwable t) {
        }
        return;
    }
    HashMap<String, ArrivalTime> wasDiverted = new HashMap<String, ArrivalTime>();
    HashMap<String, ArrivalTime> hasTime = new HashMap<String, ArrivalTime>();
    HashMap<String, Boolean> validServices = new HashMap<String, Boolean>();
    ArrayList<ArrivalTime> allBusTimes = new ArrayList<ArrivalTime>();
    ArrayList<ArrivalTime> validBusTimes = new ArrayList<ArrivalTime>();
    try {
        XmlPullParser parser = Xml.newPullParser();
        parser.setInput(new StringReader(request.content));
        boolean inBusStopServiceSelector = false;
        boolean grabValidService = false;
        while (parser.next() != XmlPullParser.END_DOCUMENT) {
            String tagName = parser.getName();
            switch (parser.getEventType()) {
            case XmlPullParser.START_TAG:
                if (tagName.equals("tr")) {
                    String classAttr = "";
                    String styleAttr = parser.getAttributeValue(null, "style");
                    if (styleAttr != null) {
                        if (styleAttr.contains("display") && styleAttr.contains("none"))
                            classAttr = "BADTIME";
                    } else {
                        classAttr = parser.getAttributeValue(null, "class");
                        if (classAttr == null)
                            continue;
                        if ((!classAttr.contains("tblanc")) && (!classAttr.contains("tgris")))
                            continue;
                        classAttr = classAttr.replace("tblanc", "");
                        classAttr = classAttr.replace("tgris", "");
                        classAttr = classAttr.trim().toLowerCase();
                    }

                    ArrivalTime bt = parseStopTime(parser, request.stopCode);
                    bt.cssClass = classAttr;
                    if (bt.isDiverted) {
                        if (wasDiverted.containsKey(bt.service))
                            continue;
                        wasDiverted.put(bt.service.toLowerCase(), bt);
                    } else {
                        hasTime.put(bt.service.toLowerCase(), bt);
                        allBusTimes.add(bt);
                    }
                } else if (tagName.equals("select")) {
                    String idAttr = parser.getAttributeValue(null, "id");
                    if (idAttr == null)
                        continue;
                    if (!idAttr.contains("busStopService"))
                        continue;

                    inBusStopServiceSelector = true;
                } else if (tagName.equals("option")) {
                    if (!inBusStopServiceSelector)
                        break;
                    grabValidService = true;
                }
                break;
            case XmlPullParser.TEXT:
                if (grabValidService) {
                    String serviceName = parser.getText().toLowerCase();
                    validServices.put(serviceName.substring(0, serviceName.indexOf(' ')), new Boolean(true));
                }
                grabValidService = false;
                break;
            case XmlPullParser.END_TAG:
                if (tagName.equals("select"))
                    inBusStopServiceSelector = false;
                break;
            }
        }

        // find the "bad" css class
        String badCssClass = "BADTIME";
        for (ArrivalTime at : allBusTimes) {
            if (!validServices.containsKey(at.service.toLowerCase())) {
                badCssClass = at.cssClass;
                break;
            }
        }

        // filter out bad times
        for (ArrivalTime at : allBusTimes) {
            if ((!validServices.containsKey(at.service.toLowerCase())) || at.cssClass.equals(badCssClass))
                continue;
            validBusTimes.add(at);
        }

        // and add in any diverted services
        for (ArrivalTime at : wasDiverted.values()) {
            if (!validServices.containsKey(at.service.toLowerCase()))
                continue;
            if (hasTime.containsKey(at.service.toLowerCase()))
                continue;

            allBusTimes.add(at);
        }

    } catch (Throwable t) {
        Log.e("BusDataHelper.GetBusTimesResponse", request.content, t);
        try {
            request.callback.onAsyncGetBusTimesError(request.requestId, BUSSTATUS_BADDATA,
                    "Invalid data was received from the bus website");
        } catch (Throwable t2) {
        }
        return;
    }

    try {
        request.callback.onAsyncGetBusTimesSuccess(request.requestId, validBusTimes);
    } catch (Throwable t) {
    }
}

From source file:DIA_Umpire_Quant.DIA_Umpire_Quant.java

/**
 * @param args the command line arguments
 *///from w  w  w .  j a  v a 2s .  c  o m
public static void main(String[] args) throws FileNotFoundException, IOException, Exception {
    System.out.println(
            "=================================================================================================");
    System.out.println("DIA-Umpire quantitation with targeted re-extraction analysis (version: "
            + UmpireInfo.GetInstance().Version + ")");
    if (args.length != 1) {
        System.out.println(
                "command format error, it should be like: java -jar -Xmx10G DIA_Umpire_Quant.jar diaumpire_quant.params");
        return;
    }
    try {
        ConsoleLogger.SetConsoleLogger(Level.INFO);
        ConsoleLogger.SetFileLogger(Level.DEBUG, FilenameUtils.getFullPath(args[0]) + "diaumpire_quant.log");
    } catch (Exception e) {
    }

    try {

        Logger.getRootLogger().info("Version: " + UmpireInfo.GetInstance().Version);
        Logger.getRootLogger().info("Parameter file:" + args[0]);

        BufferedReader reader = new BufferedReader(new FileReader(args[0]));
        String line = "";
        String WorkFolder = "";
        int NoCPUs = 2;

        String UserMod = "";
        String Combined_Prot = "";
        String InternalLibID = "";
        String ExternalLibPath = "";
        String ExternalLibDecoyTag = "DECOY";
        boolean DefaultProtFiltering = true;
        boolean DataSetLevelPepFDR = false;
        float ProbThreshold = 0.99f;
        float ExtProbThreshold = 0.99f;
        float Freq = 0f;
        int TopNPep = 6;
        int TopNFrag = 6;
        float MinFragMz = 200f;
        String FilterWeight = "GW";
        float MinWeight = 0.9f;
        float RTWindow_Int = -1f;
        float RTWindow_Ext = -1f;

        TandemParam tandemPara = new TandemParam(DBSearchParam.SearchInstrumentType.TOF5600);
        HashMap<String, File> AssignFiles = new HashMap<>();
        boolean InternalLibSearch = false;
        boolean ExternalLibSearch = false;

        boolean ExportSaint = false;
        boolean SAINT_MS1 = false;
        boolean SAINT_MS2 = true;

        HashMap<String, String[]> BaitList = new HashMap<>();
        HashMap<String, String> BaitName = new HashMap<>();
        HashMap<String, String[]> ControlList = new HashMap<>();
        HashMap<String, String> ControlName = new HashMap<>();

        //<editor-fold defaultstate="collapsed" desc="Reading parameter file">
        while ((line = reader.readLine()) != null) {
            line = line.trim();
            Logger.getRootLogger().info(line);
            if (!"".equals(line) && !line.startsWith("#")) {
                //System.out.println(line);
                if (line.equals("==File list begin")) {
                    do {
                        line = reader.readLine();
                        line = line.trim();
                        if (line.equals("==File list end")) {
                            continue;
                        } else if (!"".equals(line)) {
                            File newfile = new File(line);
                            if (newfile.exists()) {
                                AssignFiles.put(newfile.getAbsolutePath(), newfile);
                            } else {
                                Logger.getRootLogger().info("File: " + newfile + " does not exist.");
                            }
                        }
                    } while (!line.equals("==File list end"));
                }
                if (line.split("=").length < 2) {
                    continue;
                }
                String type = line.split("=")[0].trim();
                String value = line.split("=")[1].trim();
                switch (type) {
                case "TargetedExtraction": {
                    InternalLibSearch = Boolean.parseBoolean(value);
                    break;
                }
                case "InternalLibSearch": {
                    InternalLibSearch = Boolean.parseBoolean(value);
                    break;
                }
                case "ExternalLibSearch": {
                    ExternalLibSearch = Boolean.parseBoolean(value);
                    break;
                }

                case "Path": {
                    WorkFolder = value;
                    break;
                }
                case "path": {
                    WorkFolder = value;
                    break;
                }
                case "Thread": {
                    NoCPUs = Integer.parseInt(value);
                    break;
                }
                case "Fasta": {
                    tandemPara.FastaPath = value;
                    break;
                }
                case "Combined_Prot": {
                    Combined_Prot = value;
                    break;
                }
                case "DefaultProtFiltering": {
                    DefaultProtFiltering = Boolean.parseBoolean(value);
                    break;
                }
                case "DecoyPrefix": {
                    if (!"".equals(value)) {
                        tandemPara.DecoyPrefix = value;
                    }
                    break;
                }
                case "UserMod": {
                    UserMod = value;
                    break;
                }
                case "ProteinFDR": {
                    tandemPara.ProtFDR = Float.parseFloat(value);
                    break;
                }
                case "PeptideFDR": {
                    tandemPara.PepFDR = Float.parseFloat(value);
                    break;
                }
                case "DataSetLevelPepFDR": {
                    DataSetLevelPepFDR = Boolean.parseBoolean(value);
                    break;
                }
                case "InternalLibID": {
                    InternalLibID = value;
                    break;
                }
                case "ExternalLibPath": {
                    ExternalLibPath = value;
                    break;
                }
                case "ExtProbThreshold": {
                    ExtProbThreshold = Float.parseFloat(value);
                    break;
                }
                case "RTWindow_Int": {
                    RTWindow_Int = Float.parseFloat(value);
                    break;
                }
                case "RTWindow_Ext": {
                    RTWindow_Ext = Float.parseFloat(value);
                    break;
                }
                case "ExternalLibDecoyTag": {
                    ExternalLibDecoyTag = value;
                    if (ExternalLibDecoyTag.endsWith("_")) {
                        ExternalLibDecoyTag = ExternalLibDecoyTag.substring(0,
                                ExternalLibDecoyTag.length() - 1);
                    }
                    break;
                }
                case "ProbThreshold": {
                    ProbThreshold = Float.parseFloat(value);
                    break;
                }
                case "ReSearchProb": {
                    //ReSearchProb = Float.parseFloat(value);
                    break;
                }
                case "FilterWeight": {
                    FilterWeight = value;
                    break;
                }
                case "MinWeight": {
                    MinWeight = Float.parseFloat(value);
                    break;
                }
                case "TopNFrag": {
                    TopNFrag = Integer.parseInt(value);
                    break;
                }
                case "TopNPep": {
                    TopNPep = Integer.parseInt(value);
                    break;
                }
                case "Freq": {
                    Freq = Float.parseFloat(value);
                    break;
                }
                case "MinFragMz": {
                    MinFragMz = Float.parseFloat(value);
                    break;
                }

                //<editor-fold defaultstate="collapsed" desc="SaintOutput">
                case "ExportSaintInput": {
                    ExportSaint = Boolean.parseBoolean(value);
                    break;
                }
                case "QuantitationType": {
                    switch (value) {
                    case "MS1": {
                        SAINT_MS1 = true;
                        SAINT_MS2 = false;
                        break;
                    }
                    case "MS2": {
                        SAINT_MS1 = false;
                        SAINT_MS2 = true;
                        break;
                    }
                    case "BOTH": {
                        SAINT_MS1 = true;
                        SAINT_MS2 = true;
                        break;
                    }
                    }
                    break;
                }
                //                    case "BaitInputFile": {
                //                        SaintBaitFile = value;
                //                        break;
                //                    }
                //                    case "PreyInputFile": {
                //                        SaintPreyFile = value;
                //                        break;
                //                    }
                //                    case "InterationInputFile": {
                //                        SaintInteractionFile = value;
                //                        break;
                //                    }
                default: {
                    if (type.startsWith("BaitName_")) {
                        BaitName.put(type.substring(9), value);
                    }
                    if (type.startsWith("BaitFile_")) {
                        BaitList.put(type.substring(9), value.split("\t"));
                    }
                    if (type.startsWith("ControlName_")) {
                        ControlName.put(type.substring(12), value);
                    }
                    if (type.startsWith("ControlFile_")) {
                        ControlList.put(type.substring(12), value.split("\t"));
                    }
                    break;
                }
                //</editor-fold>                    
                }
            }
        }
        //</editor-fold>

        //Initialize PTM manager using compomics library
        PTMManager.GetInstance();
        if (!UserMod.equals("")) {
            PTMManager.GetInstance().ImportUserMod(UserMod);
        }

        //Check if the fasta file can be found
        if (!new File(tandemPara.FastaPath).exists()) {
            Logger.getRootLogger().info("Fasta file :" + tandemPara.FastaPath
                    + " cannot be found, the process will be terminated, please check.");
            System.exit(1);
        }

        //Check if the prot.xml file can be found
        if (!new File(Combined_Prot).exists()) {
            Logger.getRootLogger().info("ProtXML file: " + Combined_Prot
                    + " cannot be found, the export protein summary table will be empty.");
        }

        LCMSID protID = null;

        //Parse prot.xml and generate protein master list given an FDR 
        if (Combined_Prot != null && !Combined_Prot.equals("")) {
            protID = LCMSID.ReadLCMSIDSerialization(Combined_Prot);
            if (!"".equals(Combined_Prot) && protID == null) {
                protID = new LCMSID(Combined_Prot, tandemPara.DecoyPrefix, tandemPara.FastaPath);
                ProtXMLParser protxmlparser = new ProtXMLParser(protID, Combined_Prot, 0f);
                //Use DIA-Umpire default protein FDR calculation
                if (DefaultProtFiltering) {
                    protID.RemoveLowLocalPWProtein(0.8f);
                    protID.RemoveLowMaxIniProbProtein(0.9f);
                    protID.FilterByProteinDecoyFDRUsingMaxIniProb(tandemPara.DecoyPrefix, tandemPara.ProtFDR);
                } //Get protein FDR calculation without other filtering
                else {
                    protID.FilterByProteinDecoyFDRUsingLocalPW(tandemPara.DecoyPrefix, tandemPara.ProtFDR);
                }
                protID.LoadSequence();
                protID.WriteLCMSIDSerialization(Combined_Prot);
            }
            Logger.getRootLogger().info("Protein No.:" + protID.ProteinList.size());
        }
        HashMap<String, HashMap<String, FragmentPeak>> IDSummaryFragments = new HashMap<>();

        //Generate DIA file list
        ArrayList<DIAPack> FileList = new ArrayList<>();

        File folder = new File(WorkFolder);
        if (!folder.exists()) {
            Logger.getRootLogger().info("The path : " + WorkFolder + " cannot be found.");
            System.exit(1);
        }
        for (final File fileEntry : folder.listFiles()) {
            if (fileEntry.isFile()
                    && (fileEntry.getAbsolutePath().toLowerCase().endsWith(".mzxml")
                            | fileEntry.getAbsolutePath().toLowerCase().endsWith(".mzml"))
                    && !fileEntry.getAbsolutePath().toLowerCase().endsWith("q1.mzxml")
                    && !fileEntry.getAbsolutePath().toLowerCase().endsWith("q2.mzxml")
                    && !fileEntry.getAbsolutePath().toLowerCase().endsWith("q3.mzxml")) {
                AssignFiles.put(fileEntry.getAbsolutePath(), fileEntry);
            }
            if (fileEntry.isDirectory()) {
                for (final File fileEntry2 : fileEntry.listFiles()) {
                    if (fileEntry2.isFile()
                            && (fileEntry2.getAbsolutePath().toLowerCase().endsWith(".mzxml")
                                    | fileEntry2.getAbsolutePath().toLowerCase().endsWith(".mzml"))
                            && !fileEntry2.getAbsolutePath().toLowerCase().endsWith("q1.mzxml")
                            && !fileEntry2.getAbsolutePath().toLowerCase().endsWith("q2.mzxml")
                            && !fileEntry2.getAbsolutePath().toLowerCase().endsWith("q3.mzxml")) {
                        AssignFiles.put(fileEntry2.getAbsolutePath(), fileEntry2);
                    }
                }
            }
        }

        Logger.getRootLogger().info("No. of files assigned :" + AssignFiles.size());
        for (File fileEntry : AssignFiles.values()) {
            Logger.getRootLogger().info(fileEntry.getAbsolutePath());
            String mzXMLFile = fileEntry.getAbsolutePath();
            if (mzXMLFile.toLowerCase().endsWith(".mzxml") | mzXMLFile.toLowerCase().endsWith(".mzml")) {
                DIAPack DiaFile = new DIAPack(mzXMLFile, NoCPUs);
                FileList.add(DiaFile);
                HashMap<String, FragmentPeak> FragMap = new HashMap<>();
                IDSummaryFragments.put(FilenameUtils.getBaseName(mzXMLFile), FragMap);
                Logger.getRootLogger().info(
                        "=================================================================================================");
                Logger.getRootLogger().info("Processing " + mzXMLFile);
                if (!DiaFile.LoadDIASetting()) {
                    Logger.getRootLogger().info("Loading DIA setting failed, job is incomplete");
                    System.exit(1);
                }
                if (!DiaFile.LoadParams()) {
                    Logger.getRootLogger().info("Loading parameters failed, job is incomplete");
                    System.exit(1);
                }
            }
        }

        LCMSID combinePepID = null;
        if (DataSetLevelPepFDR) {
            combinePepID = LCMSID.ReadLCMSIDSerialization(WorkFolder + "combinePepID.SerFS");
            if (combinePepID == null) {
                FDR_DataSetLevel fdr = new FDR_DataSetLevel();
                fdr.GeneratePepIonList(FileList, tandemPara, WorkFolder + "combinePepID.SerFS");
                combinePepID = fdr.combineID;
                combinePepID.WriteLCMSIDSerialization(WorkFolder + "combinePepID.SerFS");
            }
        }

        //process each DIA file for quantification based on untargeted identifications
        for (DIAPack DiaFile : FileList) {
            long time = System.currentTimeMillis();
            Logger.getRootLogger().info("Loading identification results " + DiaFile.Filename + "....");

            //If the LCMSID serialization is found
            if (!DiaFile.ReadSerializedLCMSID()) {
                DiaFile.ParsePepXML(tandemPara, combinePepID);
                DiaFile.BuildStructure();
                if (!DiaFile.MS1FeatureMap.ReadPeakCluster()) {
                    Logger.getRootLogger().info("Loading peak and structure failed, job is incomplete");
                    System.exit(1);
                }
                DiaFile.MS1FeatureMap.ClearMonoisotopicPeakOfCluster();
                //Generate mapping between index of precursor feature and pseudo MS/MS scan index 
                DiaFile.GenerateClusterScanNomapping();
                //Doing quantification
                DiaFile.AssignQuant();
                DiaFile.ClearStructure();
            }
            DiaFile.IDsummary.ReduceMemoryUsage();
            time = System.currentTimeMillis() - time;
            Logger.getRootLogger().info(DiaFile.Filename + " processed time:"
                    + String.format("%d hour, %d min, %d sec", TimeUnit.MILLISECONDS.toHours(time),
                            TimeUnit.MILLISECONDS.toMinutes(time)
                                    - TimeUnit.HOURS.toMinutes(TimeUnit.MILLISECONDS.toHours(time)),
                            TimeUnit.MILLISECONDS.toSeconds(time)
                                    - TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(time))));
        }

        //<editor-fold defaultstate="collapsed" desc="Targete re-extraction using internal library">            
        Logger.getRootLogger().info(
                "=================================================================================================");
        if (InternalLibSearch && FileList.size() > 1) {
            Logger.getRootLogger().info("Module C: Targeted extraction using internal library");

            FragmentLibManager libManager = FragmentLibManager.ReadFragmentLibSerialization(WorkFolder,
                    InternalLibID);
            if (libManager == null) {
                Logger.getRootLogger().info("Building internal spectral library");
                libManager = new FragmentLibManager(InternalLibID);
                ArrayList<LCMSID> LCMSIDList = new ArrayList<>();
                for (DIAPack dia : FileList) {
                    LCMSIDList.add(dia.IDsummary);
                }
                libManager.ImportFragLibTopFrag(LCMSIDList, Freq, TopNFrag);
                libManager.WriteFragmentLibSerialization(WorkFolder);
            }
            libManager.ReduceMemoryUsage();

            Logger.getRootLogger()
                    .info("Building retention time prediction model and generate candidate peptide list");
            for (int i = 0; i < FileList.size(); i++) {
                FileList.get(i).IDsummary.ClearMappedPep();
            }
            for (int i = 0; i < FileList.size(); i++) {
                for (int j = i + 1; j < FileList.size(); j++) {
                    RTAlignedPepIonMapping alignment = new RTAlignedPepIonMapping(WorkFolder,
                            FileList.get(i).GetParameter(), FileList.get(i).IDsummary,
                            FileList.get(j).IDsummary);
                    alignment.GenerateModel();
                    alignment.GenerateMappedPepIon();
                }
                FileList.get(i).ExportID();
                FileList.get(i).IDsummary = null;
            }

            Logger.getRootLogger().info("Targeted matching........");
            for (DIAPack diafile : FileList) {
                if (diafile.IDsummary == null) {
                    diafile.ReadSerializedLCMSID();
                }
                if (!diafile.IDsummary.GetMappedPepIonList().isEmpty()) {
                    diafile.UseMappedIon = true;
                    diafile.FilterMappedIonByProb = false;
                    diafile.BuildStructure();
                    diafile.MS1FeatureMap.ReadPeakCluster();
                    diafile.MS1FeatureMap.ClearMonoisotopicPeakOfCluster();
                    diafile.GenerateMassCalibrationRTMap();
                    diafile.TargetedExtractionQuant(false, libManager, 1.1f, RTWindow_Int);
                    diafile.MS1FeatureMap.ClearAllPeaks();
                    diafile.IDsummary.ReduceMemoryUsage();
                    diafile.IDsummary.RemoveLowProbMappedIon(ProbThreshold);
                    diafile.ExportID();
                    Logger.getRootLogger().info("Peptide ions: " + diafile.IDsummary.GetPepIonList().size()
                            + " Mapped ions: " + diafile.IDsummary.GetMappedPepIonList().size());
                    diafile.ClearStructure();
                }
                diafile.IDsummary = null;
                System.gc();
            }
            Logger.getRootLogger().info(
                    "=================================================================================================");
        }
        //</editor-fold>

        //<editor-fold defaultstate="collapsed" desc="Targeted re-extraction using external library">
        //External library search
        if (ExternalLibSearch) {
            Logger.getRootLogger().info("Module C: Targeted extraction using external library");

            //Read exteranl library
            FragmentLibManager ExlibManager = FragmentLibManager.ReadFragmentLibSerialization(WorkFolder,
                    FilenameUtils.getBaseName(ExternalLibPath));
            if (ExlibManager == null) {
                ExlibManager = new FragmentLibManager(FilenameUtils.getBaseName(ExternalLibPath));

                //Import traML file
                ExlibManager.ImportFragLibByTraML(ExternalLibPath, ExternalLibDecoyTag);
                //Check if there are decoy spectra
                ExlibManager.CheckDecoys();
                //ExlibManager.ImportFragLibBySPTXT(ExternalLibPath);
                ExlibManager.WriteFragmentLibSerialization(WorkFolder);
            }
            Logger.getRootLogger()
                    .info("No. of peptide ions in external lib:" + ExlibManager.PeptideFragmentLib.size());
            for (DIAPack diafile : FileList) {
                if (diafile.IDsummary == null) {
                    diafile.ReadSerializedLCMSID();
                }
                //Generate RT mapping
                RTMappingExtLib RTmap = new RTMappingExtLib(diafile.IDsummary, ExlibManager,
                        diafile.GetParameter());
                RTmap.GenerateModel();
                RTmap.GenerateMappedPepIon();

                diafile.BuildStructure();
                diafile.MS1FeatureMap.ReadPeakCluster();
                diafile.GenerateMassCalibrationRTMap();
                //Perform targeted re-extraction
                diafile.TargetedExtractionQuant(false, ExlibManager, ProbThreshold, RTWindow_Ext);
                diafile.MS1FeatureMap.ClearAllPeaks();
                diafile.IDsummary.ReduceMemoryUsage();
                //Remove target IDs below the defined probability threshold
                diafile.IDsummary.RemoveLowProbMappedIon(ExtProbThreshold);
                diafile.ExportID();
                diafile.ClearStructure();
                Logger.getRootLogger().info("Peptide ions: " + diafile.IDsummary.GetPepIonList().size()
                        + " Mapped ions: " + diafile.IDsummary.GetMappedPepIonList().size());
            }
        }
        //</editor-fold>

        //<editor-fold defaultstate="collapsed" desc="Peptide and fragment selection">
        Logger.getRootLogger().info("Peptide and fragment selection across the whole dataset");
        ArrayList<LCMSID> SummaryList = new ArrayList<>();
        for (DIAPack diafile : FileList) {
            if (diafile.IDsummary == null) {
                diafile.ReadSerializedLCMSID();
                diafile.IDsummary.ClearAssignPeakCluster();
                //diafile.IDsummary.ClearPSMs();                    
            }
            if (protID != null) {
                //Generate protein list according to mapping of peptide ions for each DIA file to the master protein list
                diafile.IDsummary.GenerateProteinByRefIDByPepSeq(protID, true);
                diafile.IDsummary.ReMapProPep();
            }
            if ("GW".equals(FilterWeight)) {
                diafile.IDsummary.SetFilterByGroupWeight();
            } else if ("PepW".equals(FilterWeight)) {
                diafile.IDsummary.SetFilterByWeight();
            }
            SummaryList.add(diafile.IDsummary);
        }
        FragmentSelection fragselection = new FragmentSelection(SummaryList);
        fragselection.freqPercent = Freq;
        fragselection.MinFragMZ = MinFragMz;
        fragselection.GeneratePepFragScoreMap();
        fragselection.GenerateTopFragMap(TopNFrag);
        fragselection.GenerateProtPepScoreMap(MinWeight);
        fragselection.GenerateTopPepMap(TopNPep);
        //</editor-fold>

        //<editor-fold defaultstate="collapsed" desc="Writing general reports">                 
        ExportTable export = new ExportTable(WorkFolder, SummaryList, IDSummaryFragments, protID,
                fragselection);
        export.Export(TopNPep, TopNFrag, Freq);
        //</editor-fold>

        //<editor-fold defaultstate="collapsed" desc="//<editor-fold defaultstate="collapsed" desc="Generate SAINT input files">
        if (ExportSaint && protID != null) {
            HashMap<String, DIAPack> Filemap = new HashMap<>();
            for (DIAPack DIAfile : FileList) {
                Filemap.put(DIAfile.GetBaseName(), DIAfile);
            }

            FileWriter baitfile = new FileWriter(WorkFolder + "SAINT_Bait_" + DateTimeTag.GetTag() + ".txt");
            FileWriter preyfile = new FileWriter(WorkFolder + "SAINT_Prey_" + DateTimeTag.GetTag() + ".txt");
            FileWriter interactionfileMS1 = null;
            FileWriter interactionfileMS2 = null;
            if (SAINT_MS1) {
                interactionfileMS1 = new FileWriter(
                        WorkFolder + "SAINT_Interaction_MS1_" + DateTimeTag.GetTag() + ".txt");
            }
            if (SAINT_MS2) {
                interactionfileMS2 = new FileWriter(
                        WorkFolder + "SAINT_Interaction_MS2_" + DateTimeTag.GetTag() + ".txt");
            }
            HashMap<String, String> PreyID = new HashMap<>();

            for (String samplekey : ControlName.keySet()) {
                String name = ControlName.get(samplekey);
                for (String file : ControlList.get(samplekey)) {
                    baitfile.write(FilenameUtils.getBaseName(file) + "\t" + name + "\t" + "C\n");
                    LCMSID IDsummary = Filemap.get(FilenameUtils.getBaseName(file)).IDsummary;
                    if (SAINT_MS1) {
                        SaintOutput(protID, IDsummary, fragselection, interactionfileMS1, file, name, PreyID,
                                1);
                    }
                    if (SAINT_MS2) {
                        SaintOutput(protID, IDsummary, fragselection, interactionfileMS2, file, name, PreyID,
                                2);
                    }
                }
            }
            for (String samplekey : BaitName.keySet()) {
                String name = BaitName.get(samplekey);
                for (String file : BaitList.get(samplekey)) {
                    baitfile.write(FilenameUtils.getBaseName(file) + "\t" + name + "\t" + "T\n");
                    LCMSID IDsummary = Filemap.get(FilenameUtils.getBaseName(file)).IDsummary;
                    if (SAINT_MS1) {
                        SaintOutput(protID, IDsummary, fragselection, interactionfileMS1, file, name, PreyID,
                                1);
                    }
                    if (SAINT_MS2) {
                        SaintOutput(protID, IDsummary, fragselection, interactionfileMS2, file, name, PreyID,
                                2);
                    }
                }
            }
            baitfile.close();
            if (SAINT_MS1) {
                interactionfileMS1.close();
            }
            if (SAINT_MS2) {
                interactionfileMS2.close();
            }
            for (String AccNo : PreyID.keySet()) {
                preyfile.write(AccNo + "\t" + PreyID.get(AccNo) + "\n");
            }
            preyfile.close();
        }

        //</editor-fold>

        Logger.getRootLogger().info("Job done");
        Logger.getRootLogger().info(
                "=================================================================================================");

    } catch (Exception e) {
        Logger.getRootLogger().error(ExceptionUtils.getStackTrace(e));
        throw e;
    }
}

From source file:com.ptts.sync.SyncAdapter.java

/**
 * Read JSON from an input stream, storing it into the content provider.
 *
 * <p>This is where incoming data is persisted, committing the results of a sync. In order to
 * minimize (expensive) disk operations, we compare incoming data with what's already in our
 * database, and compute a merge. Only changes (insert/update/delete) will result in a database
 * write./*from  w w w .j a  va 2 s .c om*/
 *
 * <p>As an additional optimization, we use a batch operation to perform all database writes at
 * once.
 *
 * <p>Merge strategy:
 * 1. Get cursor to all items in feed<br/>
 * 2. For each item, check if it's in the incoming data.<br/>
 *    a. YES: Remove from "incoming" list. Check if data has mutated, if so, perform
 *            database UPDATE.<br/>
 *    b. NO: Schedule DELETE from database.<br/>
 * (At this point, incoming database only contains missing items.)<br/>
 * 3. For any items remaining in incoming list, ADD to database.
 */
public void updateLocalFeedData(final InputStream stream, final SyncResult syncResult)
        throws IOException, JSONException, RemoteException, OperationApplicationException, ParseException {
    final FeedParserJson feedParser = new FeedParserJson();
    final ContentResolver contentResolver = getContext().getContentResolver();

    Log.i(TAG, "Parsing stream as Json feed");
    final List<FeedParserJson.Entry> entries = feedParser.parse(stream);
    Log.i(TAG, "Parsing complete. Found " + entries.size() + " entries");

    ArrayList<ContentProviderOperation> batch = new ArrayList<ContentProviderOperation>();

    // Build hash table of incoming entries
    HashMap<String, FeedParserJson.Entry> entryMap = new HashMap<String, FeedParserJson.Entry>();
    for (FeedParserJson.Entry e : entries) {
        entryMap.put(e.id, e);
    }

    // Get list of all items
    Log.i(TAG, "Fetching local entries for merge");
    Uri uri = FeedContract.Entry.CONTENT_URI; // Get all entries
    Cursor c = contentResolver.query(uri, PROJECTION, null, null, null);
    assert c != null;
    Log.i(TAG, "Found " + c.getCount() + " local entries. Computing merge solution...");

    // Find stale data
    int id;
    String entryId;
    String name;
    String start;
    String end;
    String stops;
    while (c.moveToNext()) {
        syncResult.stats.numEntries++;
        id = c.getInt(COLUMN_ID);
        entryId = c.getString(COLUMN_ENTRY_ID);
        name = c.getString(COLUMN_NAME);
        start = c.getString(COLUMN_START);
        end = c.getString(COLUMN_END);
        stops = c.getString(COLUMN_STOPS);

        Log.i("STOPS FROM PROJECTION", stops);

        FeedParserJson.Entry match = entryMap.get(entryId);
        if (match != null) {
            // Entry exists. Remove from entry map to prevent insert later.
            entryMap.remove(entryId);
            // Check to see if the entry needs to be updated
            Uri existingUri = FeedContract.Entry.CONTENT_URI.buildUpon().appendPath(Integer.toString(id))
                    .build();

            if ((match.name != null && !match.name.equals(name))
                    || (match.start != null && !match.start.equals(start))
                    || (match.stops != null && !match.stops.equals(stops)) || (match.end != end)) {

                Log.i("STOPS FROM HASHMAP", match.stops);
                if (!match.stops.equals(stops)) {
                    Log.i("COMPARING PROJECTION " + match.stops + " & HASHMAP " + stops,
                            "The two aren't equal");
                } else {
                    Log.i("COMPARING PROJECTION & HASHMAP", "The two are equal");
                }

                // Update existing record

                Log.i(TAG, "Scheduling update: " + existingUri);
                batch.add(ContentProviderOperation.newUpdate(existingUri)
                        .withValue(FeedContract.Entry.COLUMN_NAME_ENTRY_ID, entryId)
                        .withValue(FeedContract.Entry.COLUMN_NAME_NAME, name)
                        .withValue(FeedContract.Entry.COLUMN_NAME_START, start)
                        .withValue(FeedContract.Entry.COLUMN_NAME_END, end)
                        .withValue(FeedContract.Entry.COLUMN_NAME_STOPS, stops).build());
                syncResult.stats.numUpdates++;
            } else {
                Log.i(TAG, "No action: " + existingUri);
            }
        } else {
            // Entry doesn't exist. Remove it from the database.
            Uri deleteUri = FeedContract.Entry.CONTENT_URI.buildUpon().appendPath(Integer.toString(id)).build();
            Log.i(TAG, "Scheduling delete: " + deleteUri);
            batch.add(ContentProviderOperation.newDelete(deleteUri).build());
            syncResult.stats.numDeletes++;
        }
    }
    c.close();

    // Add new items
    for (FeedParserJson.Entry e : entryMap.values()) {
        Log.i(TAG, "Scheduling insert: entry_id=" + e.id);
        batch.add(ContentProviderOperation.newInsert(FeedContract.Entry.CONTENT_URI)
                .withValue(FeedContract.Entry.COLUMN_NAME_ENTRY_ID, e.id)
                .withValue(FeedContract.Entry.COLUMN_NAME_NAME, e.name)
                .withValue(FeedContract.Entry.COLUMN_NAME_START, e.start)
                .withValue(FeedContract.Entry.COLUMN_NAME_END, e.end)
                .withValue(FeedContract.Entry.COLUMN_NAME_STOPS, e.stops).build());
        syncResult.stats.numInserts++;
    }
    Log.i(TAG, "Merge solution ready. Applying batch update");
    mContentResolver.applyBatch(FeedContract.CONTENT_AUTHORITY, batch);
    mContentResolver.notifyChange(FeedContract.Entry.CONTENT_URI, // URI where data was modified
            null, // No local observer
            false); // IMPORTANT: Do not sync to network
    // This sample doesn't support uploads, but if *your* code does, make sure you set
    // syncToNetwork=false in the line above to prevent duplicate syncs.
}

From source file:es.upm.dit.gsi.barmas.dataset.utils.DatasetSplitter.java

/**
 * This method splits the original dataset in many small datasets for a
 * given number of agents.//from   ww w. j ava2 s .c  o m
 * 
 * @param ratio
 *            0 < ratio < 1 -> Normally, 0.3 or 0.4 to build a test dataset
 *            with this percentage of the original data.
 * @param agents
 *            number of agents to split the original dataset
 * @param originalDatasetPath
 * @param outputDir
 * @param central
 *            true to create a bayescentral dataset that joint all agent
 *            data
 * @param scenario
 * @param iteration
 * @throws Exception
 */
public void splitDataset(double ratio, int agents, String originalDatasetPath, String outputDir,
        boolean central, String scenario, Logger logger, int iteration) {

    int ratioint = (int) (ratio * 100);
    double roundedratio = ((double) ratioint) / 100;
    String outputDirWithRatio = outputDir + "/" + roundedratio + "testRatio/iteration-" + iteration;
    File dir = new File(outputDirWithRatio);
    if (!dir.exists() || !dir.isDirectory()) {
        dir.mkdirs();
    }

    logger.finer("--> splitDataset()");
    logger.fine("Creating experiment.info...");
    this.createExperimentInfoFile(ratio, agents, originalDatasetPath, outputDirWithRatio, central, scenario,
            logger);

    try {
        // Look for essentials
        List<String[]> essentials = this.getEssentials(originalDatasetPath, logger);

        HashMap<String, CsvWriter> writers = new HashMap<String, CsvWriter>();
        CsvReader csvreader = new CsvReader(new FileReader(new File(originalDatasetPath)));

        csvreader.readHeaders();
        String[] headers = csvreader.getHeaders();
        int originalDatasetRowsCounter = 0;
        while (csvreader.readRecord()) {
            originalDatasetRowsCounter++;
        }
        csvreader.close();

        // Create datasets files

        // Central dataset
        if (central) {
            String fileName = outputDirWithRatio + File.separator + "bayes-central-dataset.csv";
            CsvWriter writer = new CsvWriter(new FileWriter(fileName), ',');
            writer.writeRecord(headers);
            writers.put("CENTRAL", writer);
            for (String[] essential : essentials) {
                writer.writeRecord(essential);
            }
            logger.fine("Bayes central dataset created.");
        }

        // Agent datasets
        String agentsDatasetsDir = outputDirWithRatio + File.separator + agents + "agents";
        File f = new File(agentsDatasetsDir);
        if (!f.isDirectory()) {
            f.mkdirs();
        }
        for (int i = 0; i < agents; i++) {
            String fileName = agentsDatasetsDir + File.separator + "agent-" + i + "-dataset.csv";
            CsvWriter writer = new CsvWriter(new FileWriter(fileName), ',');
            writer.writeRecord(headers);
            for (String[] essential : essentials) {
                writer.writeRecord(essential);
            }
            writers.put("AGENT" + i, writer);
            logger.fine("AGENT" + i + " dataset created.");
        }

        // Test dataset
        String fileName = outputDirWithRatio + File.separator + "test-dataset.csv";
        CsvWriter writer = new CsvWriter(new FileWriter(fileName), ',');
        writer.writeRecord(headers);
        writers.put("TEST", writer);
        logger.fine("Test dataset created.");

        // Create an ordering queue
        int testCases = (int) (ratio * originalDatasetRowsCounter);
        int testStep = originalDatasetRowsCounter / testCases;

        csvreader = new CsvReader(new FileReader(new File(originalDatasetPath)));

        csvreader.readHeaders();
        int stepCounter = 0 - (iteration % testStep);
        int agentCounter = 0;
        while (csvreader.readRecord()) {
            String[] row = csvreader.getValues();
            if (stepCounter % testStep == 0) {
                writer = writers.get("TEST");
                writer.writeRecord(row);
            } else {
                writer = writers.get("AGENT" + agentCounter);
                writer.writeRecord(row);
                writer = writers.get("CENTRAL");
                writer.writeRecord(row);
                agentCounter++;
                if (agentCounter == agents) {
                    agentCounter = 0;
                }
            }
            stepCounter++;
        }

        csvreader.close();
        for (CsvWriter w : writers.values()) {
            w.close();
        }

    } catch (Exception e) {
        logger.severe("Exception while splitting dataset. ->");
        logger.severe(e.getMessage());
        System.exit(1);
    }

    logger.finer("<-- splitDataset()");
}

From source file:hydrograph.ui.graph.editor.RenameJobParticipant.java

@Override
public Change createChange(IProgressMonitor pm) throws CoreException, OperationCanceledException {
    final HashMap<IFile, RenameResourceChange> changes = new HashMap<IFile, RenameResourceChange>();
    final String newName = ResourceChangeUtil.removeExtension(getArguments().getNewName());

    if (modifiedResource.getParent() != null) {
        if (!StringUtils.equalsIgnoreCase(modifiedResource.getParent().getName(),
                CustomMessages.ProjectSupport_JOBS)) {
            List<IResource> memberList = new ArrayList<IResource>(modifiedResource.getProject()
                    .getFolder(modifiedResource.getParent().getName()).members().length);
            ResourceChangeUtil.addMembersToList(memberList,
                    modifiedResource.getProject().getFolder(modifiedResource.getParent().getName()));
            final String fileName = ResourceChangeUtil.removeExtension(modifiedResource.getName());
            for (IResource resource : memberList) {
                if (Pattern.matches(fileName + Constants.EXTENSION, resource.getName())) {
                    if ((StringUtils.equalsIgnoreCase(Messages.XML_EXT, resource.getFileExtension())
                            || StringUtils.equalsIgnoreCase(Messages.JOB_EXT, resource.getFileExtension()))
                            && !(StringUtils.equalsIgnoreCase(modifiedResource.getName(),
                                    resource.getName()))) {
                        getRenameChanges(changes, newName, resource);
                    }/* ww w.j  a v  a  2  s .  c  o m*/
                }
            }
        } else if (StringUtils.equalsIgnoreCase(modifiedResource.getParent().getName(),
                CustomMessages.ProjectSupport_JOBS)
                || StringUtils.equalsIgnoreCase(modifiedResource.getParent().getName(),
                        CustomMessages.ProjectSupport_PARAM)) {
            List<IResource> memberList = new ArrayList<IResource>(modifiedResource.getProject()
                    .getFolder(CustomMessages.ProjectSupport_PARAM).members().length
                    + modifiedResource.getProject().getFolder(CustomMessages.ProjectSupport_JOBS)
                            .members().length);
            ResourceChangeUtil.addMembersToList(memberList,
                    modifiedResource.getProject().getFolder(CustomMessages.ProjectSupport_JOBS));
            ResourceChangeUtil.addMembersToList(memberList,
                    modifiedResource.getProject().getFolder(CustomMessages.ProjectSupport_PARAM));
            final String fileName = ResourceChangeUtil.removeExtension(modifiedResource.getName());
            for (IResource resource : memberList) {
                if (Pattern.matches(fileName + Constants.EXTENSION, resource.getName())) {
                    if ((StringUtils.equalsIgnoreCase(Messages.XML_EXT, resource.getFileExtension())
                            || StringUtils.equalsIgnoreCase(Messages.PROPERTIES_EXT,
                                    resource.getFileExtension())
                            || StringUtils.equalsIgnoreCase(Messages.JOB_EXT, resource.getFileExtension()))
                            && !(StringUtils.equalsIgnoreCase(modifiedResource.getName(),
                                    resource.getName()))) {
                        getRenameChanges(changes, newName, resource);
                    }
                    ;
                }
            }
        }
    }

    if (changes.isEmpty()) {
        return null;
    }

    CompositeChange result = new CompositeChange("Rename Job Related Files");
    for (Iterator<RenameResourceChange> iter = changes.values().iterator(); iter.hasNext();) {
        result.add((Change) iter.next());
    }
    return result;
}

From source file:org.apache.hadoop.hive.ql.exec.Utilities.java

/**
 * Remove all temporary files and duplicate (double-committed) files from a given directory.
 *
 * @return a list of path names corresponding to should-be-created empty buckets.
 *///from   ww  w . ja  va  2 s .c  o m
public static List<Path> removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] fileStats,
        DynamicPartitionCtx dpCtx, FileSinkDesc conf, Configuration hconf, Set<Path> filesKept)
        throws IOException {
    if (fileStats == null) {
        return null;
    }

    List<Path> result = new ArrayList<Path>();
    HashMap<String, FileStatus> taskIDToFile = null;
    if (dpCtx != null) {
        FileStatus parts[] = fileStats;

        for (int i = 0; i < parts.length; ++i) {
            assert parts[i].isDir() : "dynamic partition " + parts[i].getPath() + " is not a directory";
            FileStatus[] items = fs.listStatus(parts[i].getPath());

            // remove empty directory since DP insert should not generate empty partitions.
            // empty directories could be generated by crashed Task/ScriptOperator
            if (items.length == 0) {
                if (!fs.delete(parts[i].getPath(), true)) {
                    LOG.error("Cannot delete empty directory " + parts[i].getPath());
                    throw new IOException("Cannot delete empty directory " + parts[i].getPath());
                }
            }

            taskIDToFile = removeTempOrDuplicateFiles(items, fs);
            if (filesKept != null && taskIDToFile != null) {
                addFilesToPathSet(taskIDToFile.values(), filesKept);
            }
            // if the table is bucketed and enforce bucketing, we should check and generate all buckets
            if (dpCtx.getNumBuckets() > 0 && taskIDToFile != null
                    && !"tez".equalsIgnoreCase(hconf.get(ConfVars.HIVE_EXECUTION_ENGINE.varname))) {
                // refresh the file list
                items = fs.listStatus(parts[i].getPath());
                // get the missing buckets and generate empty buckets
                String taskID1 = taskIDToFile.keySet().iterator().next();
                Path bucketPath = taskIDToFile.values().iterator().next().getPath();
                for (int j = 0; j < dpCtx.getNumBuckets(); ++j) {
                    String taskID2 = replaceTaskId(taskID1, j);
                    if (!taskIDToFile.containsKey(taskID2)) {
                        // create empty bucket, file name should be derived from taskID2
                        URI bucketUri = bucketPath.toUri();
                        String path2 = replaceTaskIdFromFilename(bucketUri.getPath().toString(), j);
                        result.add(new Path(bucketUri.getScheme(), bucketUri.getAuthority(), path2));
                    }
                }
            }
        }
    } else {
        FileStatus[] items = fileStats;
        if (items.length == 0) {
            return result;
        }
        taskIDToFile = removeTempOrDuplicateFiles(items, fs);
        if (filesKept != null && taskIDToFile != null) {
            addFilesToPathSet(taskIDToFile.values(), filesKept);
        }
        if (taskIDToFile != null && taskIDToFile.size() > 0 && conf != null && conf.getTable() != null
                && (conf.getTable().getNumBuckets() > taskIDToFile.size())
                && !"tez".equalsIgnoreCase(hconf.get(ConfVars.HIVE_EXECUTION_ENGINE.varname))) {
            // get the missing buckets and generate empty buckets for non-dynamic partition
            String taskID1 = taskIDToFile.keySet().iterator().next();
            Path bucketPath = taskIDToFile.values().iterator().next().getPath();
            for (int j = 0; j < conf.getTable().getNumBuckets(); ++j) {
                String taskID2 = replaceTaskId(taskID1, j);
                if (!taskIDToFile.containsKey(taskID2)) {
                    // create empty bucket, file name should be derived from taskID2
                    URI bucketUri = bucketPath.toUri();
                    String path2 = replaceTaskIdFromFilename(bucketUri.getPath().toString(), j);
                    result.add(new Path(bucketUri.getScheme(), bucketUri.getAuthority(), path2));
                }
            }
        }
    }

    return result;
}

From source file:edu.ku.brc.dbsupport.MySQLDMBSUserMgr.java

/**
 * @param username//from ww w. ja  v  a  2s  .  c o m
 * @param dbName
 * @param doAccess
 * @return
 */
private int getPermissionsFromMySQL(final String username, final String dbName, final boolean doAccess) {
    if (connection != null) {
        if (username.equalsIgnoreCase("root")) {
            return PERM_ALL;
        }

        StringBuilder debugLines = new StringBuilder();
        boolean doDebugPerms = AppPreferences.getLocalPrefs().getBoolean("DEBUG_IT_PERMS", false);

        String[] permNames = new String[] { "Select", "Insert", "Update", "Delete", "Lock_tables", "Alter",
                "Create", "Drop" };
        int permLen = doAccess ? 5 : permNames.length;

        StringBuilder sb = new StringBuilder("SELECT host `Host`");
        if (doAccess) {
            sb.append(", user `USER`, db `Database`");
        }
        sb.append(", REPLACE(RTRIM(CONCAT(");
        for (int i = 0; i < permLen; i++) {
            String perm = permNames[i];
            if (i > 0)
                sb.append(',');
            if (doDebugPerms)
                sb.append("\n");
            sb.append(String.format("IF(%s_priv = 'Y', '%s ', '') ", perm, perm));
        }
        sb.append(")), ' ', ', ') AS `Privileges` FROM ");
        if (doAccess) {
            sb.append(String.format("mysql.db WHERE User = '%s' ORDER BY Host, User, Db", username));
        } else {
            sb.append(String.format("mysql.user WHERE User = '%s' ORDER BY Host, User", username));
        }

        log.debug(sb.toString());

        if (doDebugPerms) {
            debugLines.append(sb.toString() + "\n\n");
        }

        HashMap<String, Integer> nameToPerm = new HashMap<String, Integer>();
        for (int i = 0; i < permLen; i++) {
            nameToPerm.put(permNames[i], PERM_LIST[i]);
        }

        HashMap<String, Integer> hostHash = new HashMap<String, Integer>();
        BasicSQLUtils.setSkipTrackExceptions(true);
        Vector<Object[]> rows = BasicSQLUtils.query(connection, sb.toString());
        BasicSQLUtils.setSkipTrackExceptions(false);

        for (Object[] row : rows) {
            String host = (String) row[0];
            if (StringUtils.isNotEmpty(host)) {
                String permStr = (String) row[doAccess ? 3 : 1];
                if (StringUtils.isNotEmpty(permStr)) {
                    int perms = PERM_NONE;
                    String[] privs = StringUtils.split(permStr, ',');
                    if (privs != null && privs.length > 0) {
                        for (String p : privs) {
                            Integer prm = nameToPerm.get(p.trim());
                            if (prm != null) {
                                debugLines.append("Adding Perm: " + p.trim() + "\n");
                                perms |= prm;
                            }
                        }
                    }
                    if (doDebugPerms) {
                        debugLines.append("Host: [" + host + "]\n\n");
                    }
                    hostHash.put(host, perms);
                }
            }
        }

        if (doDebugPerms) {
            debugLines.append("hostHash.size(): " + hostHash.size() + "\n");

            int maxPerms = PERM_NONE;
            for (String key : hostHash.keySet()) {
                Integer p = hostHash.get(key);
                debugLines.append("Key/Val: [" + key + "][" + p + "]\n");
                if (p != null && p > maxPerms) {
                    maxPerms = p;
                }
            }
            debugLines.append("maxPerms: " + maxPerms + "\n");

            JTextArea ta = UIHelper.createTextArea();
            ta.setText(debugLines.toString());

            JPanel p = new JPanel(new BorderLayout());
            p.setBorder(BorderFactory.createEmptyBorder(8, 8, 8, 8));
            p.add(UIHelper.createScrollPane(ta, true), BorderLayout.CENTER);

            CustomDialog dlg = new CustomDialog((Frame) null, "Debug", true, CustomDialog.OK_BTN, p);
            dlg.setOkLabel("Close");
            UIHelper.centerAndShow(dlg);
        }

        // if only one entry use that.
        if (hostHash.size() == 1) {
            return hostHash.values().iterator().next();
        }

        // Otherwise, use the best perms for any host 
        int maxPerms = PERM_NONE;
        for (Integer p : hostHash.values()) {
            if (p > maxPerms) {
                maxPerms = p;
            }
        }

        // Old Way this needs to go away
        if (maxPerms == PERM_NONE) {
            maxPerms = getPermissionsFromUserTable(username, dbName);
            if (maxPerms != PERM_NONE) {
                UsageTracker.incrUsageCount("OLD_IT_PERMS_WORKED");
            }
        }

        return maxPerms;
    }

    return PERM_NONE;
}

From source file:com.sec.ose.osi.localdb.identification.IdentificationDBManager.java

synchronized public static void updateCodeMatchFolderSummary(String projectName, String pPath,
        FolderSummary folderSummary) {//  w  ww  . j  a v a 2  s  .  c o m
    long startTime = Time.startTime("updateCodeMatchFolderSummary");
    folderSummary.clear();

    ResultSet rs1 = null;
    try {
        String sqlForIdentified = TableIdentifiedFile.getQueryForIdentifiedListByFolder(projectName, pPath);
        log.debug("CodeMatchFolder ~~~ sqlForIdentified: " + sqlForIdentified);
        rs1 = selectSQL(sqlForIdentified);
        HashMap<String, TableIdentifiedFile> identifiedFileMap = new HashMap<String, TableIdentifiedFile>();
        // key = filepath+"-"+matchedFilePathInServer

        while (rs1.next()) {
            TableIdentifiedFile identifiedFile = new TableIdentifiedFile(
                    rs1.getString(TableIdentifiedFile.PATH), rs1.getInt(TableIdentifiedFile.TYPE),
                    rs1.getString(TableIdentifiedFile.COMPONENT), rs1.getString(TableIdentifiedFile.VERSION),
                    rs1.getString(TableIdentifiedFile.LICENSE),
                    rs1.getString(TableIdentifiedFile.MATCHED_FILE));
            String key = identifiedFile.getPath() + "-" + identifiedFile.getMatchedFile();
            identifiedFileMap.put(key, identifiedFile);
        }
        rs1.close();

        String sqlForCodeMatch = TableCodeMatch.getQueryForCodeMatchlistByFolder(projectName, pPath);

        log.debug("CodeMatchFolder ~~~ sqlForCodeMatch: " + sqlForCodeMatch);
        rs1 = selectSQL(sqlForCodeMatch);

        HashMap<String, CodeMatchInfoForFolder> codeMatchInfoForFolderMap = new HashMap<String, CodeMatchInfoForFolder>();
        // key = component+version+license+identifiedComponent+identifiedVersion+identifiedLicense

        while (rs1.next()) {
            CodeMatchInfoForFolder codeMatchFolder = new CodeMatchInfoForFolder(
                    rs1.getString(TableCodeMatch.PATH), rs1.getString(TableCodeMatch.COMPONENT),
                    rs1.getString(TableCodeMatch.VERSION), rs1.getString(TableCodeMatch.LICENSE),
                    rs1.getInt(TableCodeMatch.USAGE), rs1.getInt(TableCodeMatch.STATUS),
                    rs1.getInt(TableCodeMatch.PERCENTAGE), rs1.getString(TableCodeMatch.MATCHED_FILE),
                    rs1.getString(TableCodeMatch.COMMENT));

            String identifiedComponent = null;
            String identifiedVersion = null;
            String identifiedLicense = null;
            String key = codeMatchFolder.getPath() + "-" + codeMatchFolder.getMatchedFile();
            if (identifiedFileMap.containsKey(key)) {
                identifiedComponent = identifiedFileMap.get(key).getComponent();
                identifiedVersion = identifiedFileMap.get(key).getVersion();
                identifiedLicense = identifiedFileMap.get(key).getLicense();

            }
            String key2 = codeMatchFolder.getComponentName() + codeMatchFolder.getVersionName()
                    + codeMatchFolder.getLicenseName() + identifiedComponent + identifiedVersion
                    + identifiedLicense;

            if (codeMatchInfoForFolderMap.containsKey(key2) == false) {
                CodeMatchInfoForFolder codeMatchInfoForFolder = new CodeMatchInfoForFolder(
                        codeMatchFolder.getComponentName(), codeMatchFolder.getVersionName(),
                        codeMatchFolder.getLicenseName(), 0, 0, 0, identifiedComponent, identifiedVersion,
                        identifiedLicense);
                codeMatchInfoForFolderMap.put(key2, codeMatchInfoForFolder);
            }
            CodeMatchInfoForFolder codeMatchInfoForFolder = codeMatchInfoForFolderMap.get(key2);
            switch (codeMatchFolder.getStatus()) {

            case TableCodeMatch.CODE_MATCH_TABLE_STATUS_PENDING:
                codeMatchInfoForFolder.increasePendingSnippetCount();
                break;

            case TableCodeMatch.CODE_MATCH_TABLE_STATUS_IDENTIFIED:
                codeMatchInfoForFolder.increaseIdentifiedSnippetCount();
                break;

            case TableCodeMatch.CODE_MATCH_TABLE_STATUS_DECLARED:
                codeMatchInfoForFolder.increaseDeclaredSnippetCount();
                break;
            }
        }

        for (CodeMatchInfoForFolder codeMatchInfoForFolder : codeMatchInfoForFolderMap.values()) {
            folderSummary.addCodeMatchInforForFolderList(codeMatchInfoForFolder);
        }
        rs1.close();

    } catch (Exception e) {
        log.debug(e);
    } finally {
        Time.endTime("updateCodeMatchFolderSummary", startTime);
        if (rs1 != null) {
            try {
                rs1.close();
            } catch (Exception e) {
                log.debug(e);
            }
        }
    }
}

From source file:au.org.theark.core.dao.DataExtractionDao.java

public File createMegaCSV(Search search, DataExtractionVO allTheData, List<DemographicField> allSubjectFields,
        List<CustomFieldDisplay> biocollectionCustomFieldDisplay,
        List<CustomFieldDisplay> biospecimenCustomFieldDisplay,
        List<PhenoDataSetFieldDisplay> phenotypicCustomFieldDisplay,
        List<ConsentStatusField> consentStatusFields) {
    final String tempDir = System.getProperty("java.io.tmpdir");
    String filename = new String("COMBINED.csv");
    final java.io.File file = new File(tempDir, filename);

    long start = System.currentTimeMillis();

    //Add geno later

    Set<String> headers = new HashSet<String>();
    HashMap<String, List<String>> biospecimenMapping = new HashMap<String, List<String>>();
    HashMap<String, List<String>> biocollectionMapping = new HashMap<String, List<String>>();
    HashMap<String, List<String>> phenoCollectionMapping = new HashMap<String, List<String>>();

    for (Entry<String, ExtractionVO> entry : allTheData.getDemographicData().entrySet()) {
        headers.addAll(entry.getValue().getKeyValues().keySet());
    }//ww  w .j a v  a 2s  . c o m

    for (Entry<String, ExtractionVO> entry : allTheData.getBiospecimenData().entrySet()) {
        String subjectUID = entry.getValue().getSubjectUid();
        if (biospecimenMapping.containsKey(subjectUID)) {
            biospecimenMapping.get(subjectUID).add(entry.getKey());
        } else {
            List<String> biospecimenUIDs = new ArrayList<String>();
            biospecimenUIDs.add(entry.getKey());
            biospecimenMapping.put(subjectUID, biospecimenUIDs);
        }
    }

    for (Entry<String, ExtractionVO> entry : allTheData.getBiocollectionData().entrySet()) {
        String subjectUID = entry.getValue().getSubjectUid();
        if (biocollectionMapping.containsKey(subjectUID)) {
            biocollectionMapping.get(subjectUID).add(entry.getKey());
        } else {
            List<String> biocollectionUIDs = new ArrayList<String>();
            biocollectionUIDs.add(entry.getKey());
            biocollectionMapping.put(subjectUID, biocollectionUIDs);
        }
    }

    for (Entry<String, ExtractionVO> entry : allTheData.getPhenoCustomData().entrySet()) {
        String subjectUID = entry.getValue().getSubjectUid();
        if (phenoCollectionMapping.containsKey(subjectUID)) {

            phenoCollectionMapping.get(subjectUID).add(entry.getKey());
        } else {
            List<String> phenoCollectionIDs = new ArrayList<String>();
            phenoCollectionIDs.add(entry.getKey());
            phenoCollectionMapping.put(subjectUID, phenoCollectionIDs);
        }
    }

    for (Entry<String, ExtractionVO> entry : allTheData.getSubjectCustomData().entrySet()) {
        headers.addAll(entry.getValue().getKeyValues().keySet());
    }

    //Biospecimens
    Set<String> biospecimenHeadersSet = new HashSet<String>();
    int maxBiospecimens = 0;
    for (List<String> bs : biospecimenMapping.values()) {
        if (bs.size() > maxBiospecimens) {
            maxBiospecimens = bs.size();
        }
    }

    Iterator<ExtractionVO> iter = allTheData.getBiospecimenData().values().iterator();
    while (iter.hasNext()) {
        ExtractionVO evo = iter.next();
        biospecimenHeadersSet.addAll(evo.getKeyValues().keySet());
    }

    iter = allTheData.getBiospecimenCustomData().values().iterator();
    while (iter.hasNext()) {
        ExtractionVO evo = iter.next();
        biospecimenHeadersSet.addAll(evo.getKeyValues().keySet());
    }

    //Biocollections
    Set<String> biocollectionHeadersSet = new HashSet<String>();
    int maxBiocollections = 0;
    for (List<String> bc : biocollectionMapping.values()) {
        if (bc.size() > maxBiocollections) {
            maxBiocollections = bc.size();
        }
    }

    iter = allTheData.getBiocollectionData().values().iterator();
    while (iter.hasNext()) {
        ExtractionVO evo = iter.next();
        biocollectionHeadersSet.addAll(evo.getKeyValues().keySet());
    }

    iter = allTheData.getBiocollectionCustomData().values().iterator();
    while (iter.hasNext()) {
        ExtractionVO evo = iter.next();
        biocollectionHeadersSet.addAll(evo.getKeyValues().keySet());
    }

    //Phenocollections
    Set<String> phenoCollectionHeadersSet = new HashSet<String>();
    int maxPhenoCollections = 0;
    for (List<String> pc : phenoCollectionMapping.values()) {
        if (pc.size() > maxPhenoCollections) {
            maxPhenoCollections = pc.size();
        }
    }

    iter = allTheData.getPhenoCustomData().values().iterator();
    while (iter.hasNext()) {
        ExtractionVO evo = iter.next();
        phenoCollectionHeadersSet.addAll(evo.getKeyValues().keySet());
    }

    List<String> biospecimenHeaders = new ArrayList<String>(biospecimenHeadersSet);
    List<String> biocollectionHeaders = new ArrayList<String>(biocollectionHeadersSet);
    List<String> phenoCollectionHeaders = new ArrayList<String>(phenoCollectionHeadersSet);

    List<String> headersList = new ArrayList<String>(headers);
    Collections.sort(headersList);
    Collections.sort(biocollectionHeaders);
    Collections.sort(biospecimenHeaders);
    Collections.sort(phenoCollectionHeaders);

    biospecimenHeaders.add(0, "Biospecimen UID");
    biocollectionHeaders.add(0, "Biocollection UID");
    phenoCollectionHeaders.add(0, "Record Date");

    OutputStream outputStream;
    try {
        outputStream = new FileOutputStream(file);
        CsvWriter csv = new CsvWriter(outputStream);

        csv.write("Subject UID");

        for (String header : headersList) {
            csv.write(header);
        }

        for (int i = 1; i <= maxBiospecimens; i++) {
            for (String header : biospecimenHeaders) {
                csv.write("BS" + i + "_" + header);
            }
        }

        for (int i = 1; i <= maxBiocollections; i++) {
            for (String header : biocollectionHeaders) {
                csv.write("BC" + i + "_" + header);
            }
        }

        for (int i = 1; i <= maxPhenoCollections; i++) {
            for (String header : phenoCollectionHeaders) {
                csv.write("P" + i + "_" + header);
            }
        }

        csv.endLine();

        for (String subjectUID : allTheData.getDemographicData().keySet()) {
            List<String> row = new ArrayList<String>();
            csv.write(subjectUID);

            ExtractionVO subjectData = allTheData.getDemographicData().get(subjectUID);
            ExtractionVO subjectCustomData = allTheData.getSubjectCustomData().get(subjectUID);
            for (String header : headersList) {
                if (subjectData.getKeyValues().containsKey(header)) {
                    csv.write(subjectData.getKeyValues().get(header));
                } else if (subjectCustomData != null && subjectCustomData.getKeyValues().containsKey(header)) {
                    csv.write(subjectCustomData.getKeyValues().get(header));
                } else {
                    csv.write("");
                }
            }
            if (biospecimenMapping.containsKey(subjectUID)) {
                for (String biospecimenUID : biospecimenMapping.get(subjectUID)) {
                    ExtractionVO biospecimenData = allTheData.getBiospecimenData().get(biospecimenUID);
                    ExtractionVO biospecimenCustomData = allTheData.getBiospecimenCustomData()
                            .get(biospecimenUID);
                    for (String header : biospecimenHeaders) {
                        if (header.equals("Biospecimen UID")) {
                            csv.write(biospecimenUID);
                        } else if (biospecimenData.getKeyValues().containsKey(header)) {
                            csv.write(biospecimenData.getKeyValues().get(header));
                        } else if (biospecimenCustomData != null
                                && biospecimenCustomData.getKeyValues().containsKey(header)) {
                            csv.write(biospecimenCustomData.getKeyValues().get(header));
                        } else {
                            csv.write("");
                        }
                    }
                }
                //Inserting empty cells where subject has fewer biospecimens than the max
                if (biospecimenMapping.get(subjectUID).size() < maxBiospecimens) {
                    for (int i = 0; i < (maxBiospecimens - biospecimenMapping.get(subjectUID).size()); i++) {
                        for (String header : biospecimenHeaders) {
                            csv.write("");
                        }
                    }
                }
            } else {
                for (int i = 0; i < maxBiospecimens; i++) {
                    for (String header : biospecimenHeaders) {
                        csv.write("");
                    }
                }
            }

            if (biocollectionMapping.containsKey(subjectUID)) {
                for (String biocollectionUID : biocollectionMapping.get(subjectUID)) {
                    ExtractionVO biocollectionData = allTheData.getBiocollectionData().get(biocollectionUID);
                    ExtractionVO biocollectionCustomData = allTheData.getBiocollectionCustomData()
                            .get(biocollectionUID);
                    for (String header : biocollectionHeaders) {
                        if (header.equals("Biocollection UID")) {
                            csv.write(biocollectionUID);
                        } else if (biocollectionData.getKeyValues().containsKey(header)) {
                            csv.write(biocollectionData.getKeyValues().get(header));
                        } else if (biocollectionCustomData != null
                                && biocollectionCustomData.getKeyValues().containsKey(header)) {
                            csv.write(biocollectionCustomData.getKeyValues().get(header));
                        } else {
                            csv.write("");
                        }
                    }
                }

                //Inserting empty cells where subject has fewer biocollections than the max
                if (biocollectionMapping.get(subjectUID).size() < maxBiocollections) {
                    for (int i = 0; i < (maxBiocollections
                            - biocollectionMapping.get(subjectUID).size()); i++) {
                        for (String header : biocollectionHeaders) {
                            csv.write("");
                        }
                    }
                }
            } else {
                for (int i = 0; i < maxBiocollections; i++) {
                    for (String header : biocollectionHeaders) {
                        csv.write("");
                    }
                }
            }

            if (phenoCollectionMapping.containsKey(subjectUID)) {
                DateFormat df = new SimpleDateFormat("MM/dd/yyyy");
                for (String phenoCollectionID : phenoCollectionMapping.get(subjectUID)) {
                    ExtractionVO phenoCollectionData = allTheData.getPhenoCustomData().get(phenoCollectionID);
                    for (String header : phenoCollectionHeaders) {
                        if (header.equals("Record Date")) {
                            csv.write(df.format(phenoCollectionData.getRecordDate()));
                        } else if (phenoCollectionData.getKeyValues().containsKey(header)) {
                            csv.write(phenoCollectionData.getKeyValues().get(header));
                        } else {
                            csv.write("");
                        }
                    }
                }
                if (phenoCollectionMapping.get(subjectUID).size() < maxPhenoCollections) {
                    for (int i = 0; i < (maxPhenoCollections
                            - phenoCollectionMapping.get(subjectUID).size()); i++) {
                        for (String header : phenoCollectionHeaders) {
                            csv.write("");
                        }
                    }
                }
            } else {
                for (int i = 0; i < maxPhenoCollections; i++) {
                    for (String header : phenoCollectionHeaders) {
                        csv.write("");
                    }
                }
            }
            csv.endLine();
        }

        csv.close();

    } catch (Exception e) {
        e.printStackTrace();
    }

    log.info("taken " + (System.currentTimeMillis() - start));
    return file;
}