Example usage for java.util Set clear

List of usage examples for java.util Set clear

Introduction

In this page you can find the example usage for java.util Set clear.

Prototype

void clear();

Source Link

Document

Removes all of the elements from this set (optional operation).

Usage

From source file:org.apache.hadoop.hbase.master.RegionManager.java

/**
 * Get the set of regions that should be assignable in this pass.
 *
 * Note that no synchronization on regionsInTransition is needed because the
 * only caller (assignRegions, whose caller is ServerManager.processMsgs) owns
 * the monitor for RegionManager/*from  w w w  .  j a v a 2 s .c  om*/
 */
private Set<RegionState> regionsAwaitingAssignment(HServerInfo server) {
    // set of regions we want to assign to this server
    Set<RegionState> regionsToAssign = new HashSet<RegionState>();
    boolean isSingleServer = isSingleRegionServer();
    HServerAddress addr = server.getServerAddress();
    boolean isMetaServer = isMetaServer(addr);
    RegionState rootState = null;
    boolean isPreferredAssignment = false;
    boolean reassigningMetas = (numberOfMetaRegions.get() > onlineMetaRegions.size());
    boolean isMetaOrRoot = isMetaServer || isRootServer(addr);

    // Assign ROOT region if ROOT region is offline.
    synchronized (this.regionsInTransition) {
        rootState = regionsInTransition.get(HRegionInfo.ROOT_REGIONINFO.getRegionNameAsString());
    }
    if (rootState != null && rootState.isUnassigned()) {
        // just make sure it isn't hosting META regions (unless
        // it's the only server left).
        if (!isMetaServer || isSingleServer) {
            regionsToAssign.add(rootState);
            LOG.debug("Going to assign -ROOT- region to server " + server.getHostnamePort());
        }
        return regionsToAssign;
    }

    // Don't assign META to this server who has already hosted any ROOT or META
    if (isMetaOrRoot && reassigningMetas && !isSingleServer) {
        return regionsToAssign;
    }

    // Get the set of the regions which are preserved
    // for the current region server
    Set<HRegionInfo> preservedRegionsForCurrentRS = assignmentManager.getTransientAssignments(addr);

    synchronized (this.regionsInTransition) {
        int nonPreferredAssignment = 0;
        for (RegionState regionState : regionsInTransition.values()) {
            HRegionInfo regionInfo = regionState.getRegionInfo();
            if (regionInfo == null)
                continue;
            if (regionInfo.isRootRegion() && !regionState.isUnassigned()) {
                LOG.debug("The -ROOT- region" + " has been assigned and will be online soon. "
                        + "Do nothing for server " + server.getHostnamePort());
                break;
            }
            // Assign the META region here explicitly
            if (regionInfo.isMetaRegion()) {
                if (regionState.isUnassigned()) {
                    regionsToAssign.clear();
                    regionsToAssign.add(regionState);
                    LOG.debug("Going to assign META region: " + regionInfo.getRegionNameAsString()
                            + " to server " + server.getHostnamePort());
                } else {
                    LOG.debug("The .META. region " + regionInfo.getRegionNameAsString()
                            + " has been assigned and will be online soon. " + "Do nothing for server "
                            + server.getHostnamePort());
                }
                break;
            }

            // Can't assign user regions until all meta regions have been assigned,
            // the initial meta scan is done and there are enough online
            // region servers
            if (reassigningMetas || !master.getServerManager().hasEnoughRegionServers()) {
                LOG.debug("Cannot assign region " + regionInfo.getRegionNameAsString()
                        + " because not all the META are online, "
                        + "or the initial META scan is not completed, or there are no "
                        + "enough online region servers");
                continue;
            }

            // Cannot assign region which is NOT in the unassigned state
            if (!regionState.isUnassigned()) {
                continue;
            }

            if (preservedRegionsForCurrentRS == null || !preservedRegionsForCurrentRS.contains(regionInfo)) {
                if (assignmentManager.hasTransientAssignment(regionInfo)
                        || nonPreferredAssignment > this.maxAssignInOneGo) {
                    // Hold the region for its favored nodes and limit the number of 
                    // non preferred assignments for each region server.
                    continue;
                }
                // This is a non preferred assignment.
                isPreferredAssignment = false;
                nonPreferredAssignment++;
            } else {
                isPreferredAssignment = true;
            }

            // Assign the current region to the region server.
            regionsToAssign.add(regionState);
            LOG.debug("Going to assign user region " + regionInfo.getRegionNameAsString() + " to server "
                    + server.getHostnamePort() + " in a " + (isPreferredAssignment ? "" : "non-")
                    + "preferred way");

        }
    }
    return regionsToAssign;
}

From source file:gov.nih.nci.cabig.caaers.rules.business.service.EvaluationServiceImpl.java

/**
 * This method invokes the {@link AdverseEventEvaluationService} to obtain the report definitions suggested. 
 * Then process that information, to get the adverse event result {@link EvaluationResultDTO}
 * //from ww w  .  ja  v  a  2 s. c o  m
 * Overview on extra processing
 *   0. Ignore all the 'soft deleted' reports suggested by rules engine. 
 *   1. If child report or a report of the same group is active , parent report suggested by rules is ignored.
 *   2. All manually selected active reports are suggested by caAERS
 *   3. If there is a manual selection, ignore the others suggested by rules
 *   4. If there is an AE modified, which is part of submitted report, force amend it. 
 *   5. If any, Withdraw all active reports (non manually selected), that are not suggested.
 *   
 * @param aeReport - The {@link ExpeditedAdverseEventReport}
 */
public void findRequiredReportDefinitions(ExpeditedAdverseEventReport aeReport, List<AdverseEvent> aeList,
        Study study, EvaluationResultDTO evaluationResult) {
    Map<AdverseEvent, List<ReportDefinition>> adverseEventRecommendedReportsMap = new HashMap<AdverseEvent, List<ReportDefinition>>();

    List<AdverseEvent> deletedAeList = new ArrayList<AdverseEvent>();
    List<AdverseEvent> newAeList = new ArrayList<AdverseEvent>();
    List<AdverseEvent> modifiedAeList = new ArrayList<AdverseEvent>();
    List<AdverseEvent> evaluatableAeList = new ArrayList<AdverseEvent>();
    for (AdverseEvent ae : aeList) {
        if (ae.isRetired()) {
            deletedAeList.add(ae);
        } else if (ae.getReport() == null) {
            newAeList.add(ae);
        } else {
            modifiedAeList.add(ae);
        }
    }

    evaluatableAeList.addAll(modifiedAeList);
    evaluatableAeList.addAll(newAeList);

    ExpeditedAdverseEventReport expeditedData = aeReport.getId() == null ? null : aeReport;
    //to hold the report defnitions while cleaning up. 
    Map<String, ReportDefinition> loadedReportDefinitionsMap = new HashMap<String, ReportDefinition>();

    Map<AdverseEvent, List<AdverseEventEvaluationResult>> adverseEventEvaluationResultMap;
    Map<AdverseEvent, List<String>> map;

    boolean alertNeeded = false;
    Integer aeReportId = expeditedData == null ? new Integer(0) : expeditedData.getId();
    try {
        //evaluate the SAE reporting rules
        adverseEventEvaluationResultMap = adverseEventEvaluationService.evaluateSAEReportSchedule(aeReport,
                evaluatableAeList, study);
        evaluationResult.getRulesEngineRawResultMap().put(aeReportId, adverseEventEvaluationResultMap);
        map = new HashMap<AdverseEvent, List<String>>();

        // clear the recommended reports map
        adverseEventRecommendedReportsMap.clear();

        //clean up - by eliminating the deleted report definitions.
        for (Map.Entry<AdverseEvent, List<AdverseEventEvaluationResult>> entry : adverseEventEvaluationResultMap
                .entrySet()) {
            Set<String> rdNameSet = new HashSet<String>();
            AdverseEvent adverseEvent = entry.getKey();
            Set<ReportDefinition> recommendedAeReports = new HashSet<ReportDefinition>();
            for (AdverseEventEvaluationResult aeEvalResult : entry.getValue()) {
                for (String response : aeEvalResult.getRuleEvaluationResult().getResponses()) {
                    if (!StringUtils.isBlank(response)) {
                        ReportDefinition rd = reportDefinitionDao.getByName(response);
                        if (rd != null) {
                            recommendedAeReports.add(rd);
                        }
                    }
                }
            }
            adverseEventRecommendedReportsMap.put(adverseEvent,
                    new ArrayList<ReportDefinition>(recommendedAeReports));

            List<String> validReportDefNames = new ArrayList<String>();
            map.put(adverseEvent, validReportDefNames);
            evaluationResult.addProcessingStep(aeReportId, "RulesEngine: Evaluation for adverse event ("
                    + AdverseEvent.toReadableString(adverseEvent) + ") :", null);
            for (AdverseEventEvaluationResult adverseEventEvaluationResult : entry.getValue()) {
                evaluationResult.addProcessingStep(aeReportId, " RuleSet:",
                        adverseEventEvaluationResult.getRuleMetadata());
                evaluationResult.addProcessingStep(aeReportId, " Raw message :",
                        adverseEventEvaluationResult.getMessage());
                if (adverseEventEvaluationResult.getRuleEvaluationResult() != null) {
                    evaluationResult.addProcessingStep(aeReportId, " Bind URL :",
                            adverseEventEvaluationResult.getRuleEvaluationResult().getBindURI());
                    evaluationResult.addProcessingStep(aeReportId, " Matched rules :",
                            adverseEventEvaluationResult.getRuleEvaluationResult().getMatchedRules()
                                    .toString());
                    for (String note : adverseEventEvaluationResult.getNotes()) {
                        evaluationResult.addProcessingStep(aeReportId, "  Notes: ", note);
                    }
                    evaluationResult.addProcessingStep(aeReportId, " Matched rules :",
                            adverseEventEvaluationResult.getRuleEvaluationResult().getMatchedRules()
                                    .toString());

                } else {
                    evaluationResult.addProcessingStep(aeReportId, " Bind URL :", null);
                    evaluationResult.addProcessingStep(aeReportId, " Matched rules :", null);
                }

                if (adverseEventEvaluationResult.isCannotDetermine()
                        || adverseEventEvaluationResult.isNoRulesFound())
                    continue;

                evaluationResult.addProcessingStep(aeReportId, " Raw suggestions :",
                        adverseEventEvaluationResult.getRuleEvaluationResult().getResponses().toString());

                rdNameSet.addAll(adverseEventEvaluationResult.getRuleEvaluationResult().getResponses());
            }

            //CAAERS-5702
            if (rdNameSet.contains("IGNORE")) {
                rdNameSet.clear();
                evaluationResult.addProcessingStep(aeReportId,
                        "caAERS : Protocol specific exception, so removing all recommendations", "");
            }

            for (String reportDefName : rdNameSet) {
                ReportDefinition rd = loadedReportDefinitionsMap.get(reportDefName);
                if (rd == null) {
                    rd = reportDefinitionDao.getByName(reportDefName);
                    if (rd == null) {
                        evaluationResult.addProcessingStep(aeReportId, "report definition missing in database ",
                                reportDefName);
                        log.warn("Report definition (" + reportDefName
                                + "), is referred in rules but is not found");
                        continue; //we cannot find the report referred by the rule
                    }
                    if (rd.getEnabled()) {
                        loadedReportDefinitionsMap.put(reportDefName, rd);
                    } else {
                        log.debug("Ignoring Report definition [" + reportDefName + "] as it is disabled");
                    }
                }

                if (rd.getEnabled()) {
                    validReportDefNames.add(reportDefName);
                }

            }
            evaluationResult.addProcessingStep(aeReportId, "caAERS : Plausible suggestions :",
                    validReportDefNames.toString());
            evaluationResult.addProcessingStep(aeReportId, " ", null);

        }

        for (Map.Entry<AdverseEvent, List<ReportDefinition>> entry : adverseEventRecommendedReportsMap
                .entrySet()) {
            List<ReportDefinition> filteredRdList = reportDefinitionFilter.filter(entry.getValue());
            entry.setValue(filteredRdList);
        }

        //save this for reference.
        evaluationResult.addRulesEngineResult(aeReportId, map);

        //now load report definitions
        List<ReportDefinition> defList = new ArrayList<ReportDefinition>();
        defList.addAll(loadedReportDefinitionsMap.values());

        List<Report> completedReports = expeditedData == null ? new ArrayList<Report>()
                : expeditedData.listReportsHavingStatus(ReportStatus.COMPLETED);

        //Remove all NOTIFICATIONS from completed reports. As notifications must be completed by a subsequent full report.
        List<Report> notificationsToRemove = new ArrayList<Report>();
        for (Report report : completedReports) {
            List<ReportDefinition> rdList = ReportDefinition.findByName(defList, report.getName());
            if (!rdList.isEmpty() && rdList.get(0).getReportType() == ReportType.NOTIFICATION) {
                notificationsToRemove.add(report);
            }
        }
        completedReports.removeAll(notificationsToRemove);

        if (!completedReports.isEmpty()) {

            for (AdverseEvent adverseEvent : evaluatableAeList) {

                if (adverseEvent.getReport() == null)
                    continue; //unreported AE -  continue

                List<String> nameList = map.get(adverseEvent);

                if (adverseEvent.isModified()) {
                    //throw away notifications if AE is already reported.
                    for (Report report : completedReports) {
                        if (report.isReported(adverseEvent)) {
                            List<ReportDefinition> rdList = ReportDefinition.findByName(defList,
                                    nameList.toArray(new String[0]));
                            List<ReportDefinition> sameOrgGroupList = ReportDefinition
                                    .findBySameOrganizationAndGroup(rdList, report.getReportDefinition());
                            if (sameOrgGroupList.size() > 1) {
                                List<ReportDefinition> rdNotificationList = ReportDefinition
                                        .findByReportType(sameOrgGroupList, ReportType.NOTIFICATION);
                                for (ReportDefinition rd : rdNotificationList) {
                                    // we must remove these from suggestions.
                                    nameList.remove(rd.getName());
                                    boolean removed = defList.remove(rd);
                                    evaluationResult.removeReportDefinitionName(aeReportId, adverseEvent,
                                            rd.getName());
                                    evaluationResult.addProcessingStep(aeReportId,
                                            "caAERS : Adverse event ("
                                                    + AdverseEvent.toReadableString(adverseEvent)
                                                    + ") is already reported in :",
                                            "" + report.getId());
                                    evaluationResult.addProcessingStep(aeReportId,
                                            " Notifications are not needed again, removing:", rd.getName());
                                    evaluationResult.addProcessingStep(aeReportId, " removed ? :",
                                            String.valueOf(removed));
                                }

                            }
                        }
                    }
                } else {
                    //throw away rules suggestion - if AE is not modified and is part of a submitted report OR if AE is new

                    for (Report report : completedReports) {
                        if (report.isReported(adverseEvent)) {
                            nameList.remove(report.getName());
                            List<ReportDefinition> rdList = ReportDefinition.findByName(defList,
                                    new String[] { report.getName() });
                            if (!rdList.isEmpty())
                                defList.remove(rdList.get(0));
                            evaluationResult.removeReportDefinitionName(aeReportId, adverseEvent,
                                    report.getName());
                            evaluationResult.addProcessingStep(aeReportId, "caAERS : Adverse event ("
                                    + AdverseEvent.toReadableString(adverseEvent) + "):", null);
                            evaluationResult.addProcessingStep(aeReportId,
                                    " Unmodified and belongs to completed report :", null);
                            evaluationResult.addProcessingStep(aeReportId, " Removing suggestion :",
                                    report.getName());

                        }
                    }

                }

            }
        }

        //Update AE reporting flag (or sae flag)
        for (AdverseEvent ae : map.keySet()) {
            List<String> nameList = map.get(ae);
            ae.setRequiresReporting(!nameList.isEmpty());
            evaluationResult.addProcessingStep(aeReportId,
                    "caAERS: Adverse event (" + AdverseEvent.toReadableString(ae) + ") may need reporting ? : ",
                    String.valueOf(ae.getRequiresReporting()));
        }

        //logging
        if (log.isDebugEnabled()) {
            log.debug("Rules Engine Result for : " + aeReportId + ", " + String.valueOf(map));
        }

        //  - If child report is active, select that instead of parent. 
        // - If there is a manual selection, ignore rules engine suggestions from the same group
        // - If the manual selection is always a preferred one (ie. by default add active manual selected reports). 
        // - If there is an ae modified, which is part of completed report, force amending it.
        List<Report> activeReports = null;
        if (expeditedData != null) {
            activeReports = expeditedData.getActiveReports();
            List<Report> manuallySelectedReports = expeditedData.getManuallySelectedReports();

            //a temporary list
            List<ReportDefinition> tmplist = new ArrayList<ReportDefinition>(defList);

            //keep active child report instead of parent.
            for (Report activeReport : activeReports) {
                ReportDefinition rdParent = activeReport.getReportDefinition().getParent();
                ReportDefinition rdFound = findReportDefinition(tmplist, rdParent);

                if (rdFound != null) {
                    //remove parent and keep child
                    defList.remove(rdFound);
                    defList.add(activeReport.getReportDefinition());
                    evaluationResult.replaceReportDefinitionName(aeReportId, rdFound.getName(),
                            activeReport.getName());
                    evaluationResult.addProcessingStep(aeReportId,
                            "caAERS: Active child report (" + activeReport.getName() + ") present", null);
                    evaluationResult.addProcessingStep(aeReportId, " Removing suggestion", rdFound.getName());
                }
            }

            //throw away all suggestions of rules engine, (if they belong to the same group as that of manually selected)
            for (Report manualReport : manuallySelectedReports) {
                ReportDefinition rdManual = manualReport.getReportDefinition();

                for (ReportDefinition rdSuggested : tmplist) {
                    if (rdSuggested.isOfSameReportTypeAndOrganization(rdManual) && manualReport.isActive()) {
                        //remove it from rules engine suggestions
                        defList.remove(rdSuggested);
                        evaluationResult.replaceReportDefinitionName(aeReportId, rdSuggested.getName(),
                                rdManual.getName());
                        evaluationResult.addProcessingStep(aeReportId,
                                "caAERS: Manually selected report (" + rdManual.getName() + ") present", null);
                        evaluationResult.addProcessingStep(aeReportId, " Removing suggestion",
                                rdSuggested.getName());
                    }
                }

                //now add the manually selected report.
                defList.add(rdManual);
                evaluationResult.addReportDefinitionName(aeReportId, rdManual.getName());
                evaluationResult.addProcessingStep(aeReportId, " Adding to suggestion ", rdManual.getName());

            }

            //any ae modified/got completed reports ? add those report definitions.
            if (defList.isEmpty() && !modifiedAeList.isEmpty()) {
                //Any completed report, suggest amending it to proceed (but no alert).
                for (Report report : completedReports) {

                    ReportDefinition rdCompleted = report.getReportDefinition();

                    if (!rdCompleted.getAmendable())
                        continue;

                    defList.add(rdCompleted);
                    for (AdverseEvent ae : modifiedAeList) {
                        evaluationResult.addReportDefinitionName(aeReportId, ae, rdCompleted.getName());
                        evaluationResult.addProcessingStep(aeReportId, "caAERS: Submitted adverse event ("
                                + AdverseEvent.toReadableString(ae) + ") is modified : ", null);
                        evaluationResult.addProcessingStep(aeReportId, " Adding to suggestion ",
                                rdCompleted.getName());

                    }

                }
            }

            //CAAERS-7067 - the deletions must suggest an Amend (ONLY if the AE was reported on last submitted report)
            if (!deletedAeList.isEmpty()) {
                // find latest submission from each group and org
                List<Report> lastSubmittedReports = new ArrayList<Report>();
                Set<Integer> rdIdSet = new HashSet<Integer>(); //using Set for reports may complicate stuff with equals on hibernate proxy
                for (Report completedReport : completedReports) {
                    Report latestReport = aeReport
                            .findLastSubmittedReport(completedReport.getReportDefinition());
                    if (rdIdSet.add(latestReport.getReportDefinition().getId())) {
                        lastSubmittedReports.add(latestReport);
                    }
                }

                //for each such report, if the AE deleted is submitted on that, then suggest ammend.
                for (Report submittedReport : lastSubmittedReports) {
                    ReportDefinition rdCompleted = submittedReport.getReportDefinition();
                    if (rdCompleted.getReportType() == ReportType.NOTIFICATION)
                        continue; //CAAERS-7041
                    if (!rdCompleted.getAmendable())
                        continue;

                    for (AdverseEvent ae : deletedAeList) {
                        boolean reported = submittedReport.isReported(ae);
                        if (reported) {
                            defList.add(rdCompleted);
                            evaluationResult.addReportDefinitionName(aeReportId, ae, rdCompleted.getName());
                            evaluationResult.addProcessingStep(aeReportId, "caAERS: Submitted adverse event ("
                                    + AdverseEvent.toReadableString(ae) + ") is deleted : ", null);
                            evaluationResult.addProcessingStep(aeReportId, " Adding to suggestion ",
                                    rdCompleted.getName());
                        }
                    }
                }
            }

        }

        //logging 
        if (log.isDebugEnabled()) {
            log.debug("Report Definitions before filtering for aeReportId: " + aeReportId + ", "
                    + String.valueOf(defList));
        }

        //filter the report definitions
        List<ReportDefinition> reportDefinitions = reportDefinitionFilter.filter(defList);

        if (reportDefinitions != null) {
            List<String> filteredReportDefnitionNames = new ArrayList<String>();
            for (ReportDefinition rd : reportDefinitions) {
                filteredReportDefnitionNames.add(rd.getName());
            }
            evaluationResult.addProcessingStep(aeReportId, " ", null);
            evaluationResult.addProcessingStep(aeReportId, "caAERS: Final suggestion after filtering :",
                    filteredReportDefnitionNames.toString());
        }

        //modify the alert necessary flag, based on eventual set of report definitions
        if (expeditedData == null) {
            alertNeeded = !reportDefinitions.isEmpty();
        } else {
            for (ReportDefinition reportDefinition : reportDefinitions) {
                alertNeeded |= expeditedData.findReportsToEdit(reportDefinition).isEmpty();
            }
        }
        evaluationResult.getAeReportAlertMap().put(aeReportId, alertNeeded);
        evaluationResult.addProcessingStep(aeReportId, "caAERS: Alert is needed ? ",
                String.valueOf(alertNeeded));

        //logging 
        if (log.isDebugEnabled()) {
            log.debug("Report Definitions after filtering for aeReportId: " + aeReportId + ", "
                    + String.valueOf(reportDefinitions));
        }

        //now go through each report definition and set amend/create edit/withdraw/create maps properly
        Set<ReportDefinitionWrapper> rdCreateSet = new HashSet<ReportDefinitionWrapper>();
        Set<ReportDefinitionWrapper> rdEditSet = new HashSet<ReportDefinitionWrapper>();
        Set<ReportDefinitionWrapper> rdWithdrawSet = new HashSet<ReportDefinitionWrapper>();
        Set<ReportDefinitionWrapper> rdAmmendSet = new HashSet<ReportDefinitionWrapper>();

        ReportDefinitionWrapper wrapper;
        for (ReportDefinition rd : reportDefinitions) {

            if (expeditedData == null) {
                //all report definitions, should go in the createMap.
                wrapper = new ReportDefinitionWrapper(rd, null, ActionType.CREATE);
                wrapper.setStatus("Not started");
                rdCreateSet.add(wrapper);
            } else {

                //find reports getting amended
                List<Report> reportsAmmended = expeditedData.findReportsToAmmend(rd);
                for (Report report : reportsAmmended) {
                    wrapper = new ReportDefinitionWrapper(report.getReportDefinition(), rd, ActionType.AMEND);
                    wrapper.setStatus(report.getLastVersion().getStatusAsString());
                    wrapper.setSubmittedOn(report.getSubmittedOn());
                    rdAmmendSet.add(wrapper);
                }

                //find reports getting withdrawn
                List<Report> reportsWithdrawn = expeditedData.findReportsToWithdraw(rd);
                for (Report report : reportsWithdrawn) {
                    wrapper = new ReportDefinitionWrapper(report.getReportDefinition(), rd,
                            ActionType.WITHDRAW);
                    wrapper.setStatus("In process");
                    wrapper.setDueOn(report.getDueOn());
                    rdWithdrawSet.add(wrapper);
                }

                //find the reports getting edited
                List<Report> reportsEdited = expeditedData.findReportsToEdit(rd);
                for (Report report : reportsEdited) {
                    wrapper = new ReportDefinitionWrapper(report.getReportDefinition(), rd, ActionType.EDIT);
                    wrapper.setStatus("In process");
                    wrapper.setDueOn(report.getDueOn());
                    rdEditSet.add(wrapper);
                }

                //Nothing getting edited,  add in this report def in create list
                if (reportsEdited.isEmpty() && reportsAmmended.isEmpty() && reportsWithdrawn.isEmpty()) {
                    wrapper = new ReportDefinitionWrapper(rd, null, ActionType.CREATE);
                    wrapper.setStatus("Not started");
                    rdCreateSet.add(wrapper);
                }

            } //if expeditedData  

        } //for rd

        //Check if there is a need to withdraw any active report. 
        if (expeditedData != null && activeReports != null) {
            for (Report report : activeReports) {
                ReportDefinition rdActive = report.getReportDefinition();
                if (report.isManuallySelected())
                    continue;
                boolean toBeWithdrawn = true;
                for (ReportDefinitionWrapper editWrapper : rdEditSet) {
                    if (editWrapper.getDef().equals(rdActive)) {
                        toBeWithdrawn = false;
                        break;
                    }
                }

                if (toBeWithdrawn) {
                    for (ReportDefinitionWrapper withdrawWrapper : rdWithdrawSet) {
                        if (withdrawWrapper.getDef().equals(rdActive)) {
                            toBeWithdrawn = false;
                            break;
                        }
                    }
                }

                if (toBeWithdrawn) {
                    wrapper = new ReportDefinitionWrapper(rdActive, null, ActionType.WITHDRAW);
                    wrapper.setDueOn(report.getDueOn());
                    wrapper.setStatus("In process");
                    rdWithdrawSet.add(wrapper);
                }
            }
        }

        //add everything to the result.
        evaluationResult.getCreateMap().put(aeReportId, rdCreateSet);
        evaluationResult.getAmendmentMap().put(aeReportId, rdAmmendSet);
        evaluationResult.getEditMap().put(aeReportId, rdEditSet);
        evaluationResult.getWithdrawalMap().put(aeReportId, rdWithdrawSet);

        if (!rdCreateSet.isEmpty()) {
            evaluationResult.addProcessingStep(aeReportId, "caAERS: Create options :", null);
            for (ReportDefinitionWrapper rdWrapper : rdCreateSet) {
                evaluationResult.addProcessingStep(aeReportId, " " + rdWrapper.getReadableMessage(), null);
            }
        }

        if (!rdAmmendSet.isEmpty()) {
            evaluationResult.addProcessingStep(aeReportId, "caAERS: Amend options :", null);
            for (ReportDefinitionWrapper rdWrapper : rdAmmendSet) {
                evaluationResult.addProcessingStep(aeReportId, " " + rdWrapper.getReadableMessage(), null);
            }
        }

        if (!rdEditSet.isEmpty()) {
            evaluationResult.addProcessingStep(aeReportId, "caAERS: Edit options :", null);
            for (ReportDefinitionWrapper rdWrapper : rdEditSet) {
                evaluationResult.addProcessingStep(aeReportId, " " + rdWrapper.getReadableMessage(), null);
            }
        }

        if (!rdWithdrawSet.isEmpty()) {
            evaluationResult.addProcessingStep(aeReportId, "caAERS: Withdraw options :", null);
            for (ReportDefinitionWrapper rdWrapper : rdWithdrawSet) {
                evaluationResult.addProcessingStep(aeReportId, " " + rdWrapper.getReadableMessage(), null);
            }
        }

        //update the result object
        evaluationResult.addEvaluatedAdverseEvents(aeReportId, evaluatableAeList);
        //           evaluationResult.addResult(aeList, reportDefinitions);
        evaluationResult.addResult(expeditedData, reportDefinitions);

    } catch (Exception e) {
        throw new CaaersSystemException(
                "Could not determine the reports necessary for the given expedited adverse event data", e);
    }

}

From source file:io.warp10.continuum.egress.EgressFetchHandler.java

@Override
public void handle(String target, Request baseRequest, HttpServletRequest req, HttpServletResponse resp)
        throws IOException, ServletException {
    boolean fromArchive = false;
    boolean splitFetch = false;
    boolean writeTimestamp = false;

    if (Constants.API_ENDPOINT_FETCH.equals(target)) {
        baseRequest.setHandled(true);/*from  w ww . j  a  v a2 s.  co m*/
        fromArchive = false;
    } else if (Constants.API_ENDPOINT_AFETCH.equals(target)) {
        baseRequest.setHandled(true);
        fromArchive = true;
    } else if (Constants.API_ENDPOINT_SFETCH.equals(target)) {
        baseRequest.setHandled(true);
        splitFetch = true;
    } else if (Constants.API_ENDPOINT_CHECK.equals(target)) {
        baseRequest.setHandled(true);
        resp.setStatus(HttpServletResponse.SC_OK);
        return;
    } else {
        return;
    }

    try {
        // Labels for Sensision
        Map<String, String> labels = new HashMap<String, String>();

        labels.put(SensisionConstants.SENSISION_LABEL_TYPE, target);

        //
        // Add CORS header
        //

        resp.setHeader("Access-Control-Allow-Origin", "*");

        String start = null;
        String stop = null;

        long now = Long.MIN_VALUE;
        long timespan = 0L;

        String nowParam = null;
        String timespanParam = null;
        String dedupParam = null;
        String showErrorsParam = null;

        if (splitFetch) {
            nowParam = req.getHeader(Constants.getHeader(Configuration.HTTP_HEADER_NOW_HEADERX));
            timespanParam = req.getHeader(Constants.getHeader(Configuration.HTTP_HEADER_TIMESPAN_HEADERX));
            showErrorsParam = req.getHeader(Constants.getHeader(Configuration.HTTP_HEADER_SHOW_ERRORS_HEADERX));
        } else {
            start = req.getParameter(Constants.HTTP_PARAM_START);
            stop = req.getParameter(Constants.HTTP_PARAM_STOP);

            nowParam = req.getParameter(Constants.HTTP_PARAM_NOW);
            timespanParam = req.getParameter(Constants.HTTP_PARAM_TIMESPAN);
            dedupParam = req.getParameter(Constants.HTTP_PARAM_DEDUP);
            showErrorsParam = req.getParameter(Constants.HTTP_PARAM_SHOW_ERRORS);
        }

        String maxDecoderLenParam = req.getParameter(Constants.HTTP_PARAM_MAXSIZE);
        int maxDecoderLen = null != maxDecoderLenParam ? Integer.parseInt(maxDecoderLenParam)
                : Constants.DEFAULT_PACKED_MAXSIZE;

        String suffix = req.getParameter(Constants.HTTP_PARAM_SUFFIX);
        if (null == suffix) {
            suffix = Constants.DEFAULT_PACKED_CLASS_SUFFIX;
        }

        boolean unpack = null != req.getParameter(Constants.HTTP_PARAM_UNPACK);

        long chunksize = Long.MAX_VALUE;

        if (null != req.getParameter(Constants.HTTP_PARAM_CHUNKSIZE)) {
            chunksize = Long.parseLong(req.getParameter(Constants.HTTP_PARAM_CHUNKSIZE));
        }

        if (chunksize <= 0) {
            throw new IOException("Invalid chunksize.");
        }

        boolean showErrors = null != showErrorsParam;
        boolean dedup = null != dedupParam && "true".equals(dedupParam);

        if (null != start && null != stop) {
            long tsstart = fmt.parseDateTime(start).getMillis() * Constants.TIME_UNITS_PER_MS;
            long tsstop = fmt.parseDateTime(stop).getMillis() * Constants.TIME_UNITS_PER_MS;

            if (tsstart < tsstop) {
                now = tsstop;
                timespan = tsstop - tsstart;
            } else {
                now = tsstart;
                timespan = tsstart - tsstop;
            }
        } else if (null != nowParam && null != timespanParam) {
            if ("now".equals(nowParam)) {
                now = TimeSource.getTime();
            } else {
                try {
                    now = Long.parseLong(nowParam);
                } catch (Exception e) {
                    now = fmt.parseDateTime(nowParam).getMillis() * Constants.TIME_UNITS_PER_MS;
                }
            }

            timespan = Long.parseLong(timespanParam);
        }

        if (Long.MIN_VALUE == now) {
            resp.sendError(HttpServletResponse.SC_BAD_REQUEST,
                    "Missing now/timespan or start/stop parameters.");
            return;
        }

        String selector = splitFetch ? null : req.getParameter(Constants.HTTP_PARAM_SELECTOR);

        //
        // Extract token from header
        //

        String token = req.getHeader(Constants.getHeader(Configuration.HTTP_HEADER_TOKENX));

        // If token was not found in header, extract it from the 'token' parameter
        if (null == token && !splitFetch) {
            token = req.getParameter(Constants.HTTP_PARAM_TOKEN);
        }

        String fetchSig = req.getHeader(Constants.getHeader(Configuration.HTTP_HEADER_FETCH_SIGNATURE));

        //
        // Check token signature if it was provided
        //

        boolean signed = false;

        if (splitFetch) {
            // Force showErrors
            showErrors = true;
            signed = true;
        }

        if (null != fetchSig) {
            if (null != fetchPSK) {
                String[] subelts = fetchSig.split(":");
                if (2 != subelts.length) {
                    throw new IOException("Invalid fetch signature.");
                }
                long nowts = System.currentTimeMillis();
                long sigts = new BigInteger(subelts[0], 16).longValue();
                long sighash = new BigInteger(subelts[1], 16).longValue();

                if (nowts - sigts > 10000L) {
                    throw new IOException("Fetch signature has expired.");
                }

                // Recompute hash of ts:token

                String tstoken = Long.toString(sigts) + ":" + token;

                long checkedhash = SipHashInline.hash24(fetchPSK, tstoken.getBytes(Charsets.ISO_8859_1));

                if (checkedhash != sighash) {
                    throw new IOException("Corrupted fetch signature");
                }

                signed = true;
            } else {
                throw new IOException("Fetch PreSharedKey is not set.");
            }
        }

        ReadToken rtoken = null;

        String format = splitFetch ? "wrapper" : req.getParameter(Constants.HTTP_PARAM_FORMAT);

        if (!splitFetch) {
            try {
                rtoken = Tokens.extractReadToken(token);

                if (rtoken.getHooksSize() > 0) {
                    throw new IOException("Tokens with hooks cannot be used for fetching data.");
                }
            } catch (WarpScriptException ee) {
                throw new IOException(ee);
            }

            if (null == rtoken) {
                resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Missing token.");
                return;
            }
        }

        boolean showAttr = "true".equals(req.getParameter(Constants.HTTP_PARAM_SHOWATTR));

        boolean sortMeta = "true".equals(req.getParameter(Constants.HTTP_PARAM_SORTMETA));

        //
        // Extract the class and labels selectors
        // The class selector and label selectors are supposed to have
        // values which use percent encoding, i.e. explicit percent encoding which
        // might have been re-encoded using percent encoding when passed as parameter
        //
        //

        Set<Metadata> metadatas = new HashSet<Metadata>();
        List<Iterator<Metadata>> iterators = new ArrayList<Iterator<Metadata>>();

        if (!splitFetch) {

            if (null == selector) {
                throw new IOException("Missing '" + Constants.HTTP_PARAM_SELECTOR + "' parameter.");
            }

            String[] selectors = selector.split("\\s+");

            for (String sel : selectors) {
                Matcher m = SELECTOR_RE.matcher(sel);

                if (!m.matches()) {
                    resp.sendError(HttpServletResponse.SC_BAD_REQUEST);
                    return;
                }

                String classSelector = URLDecoder.decode(m.group(1), "UTF-8");
                String labelsSelection = m.group(2);

                Map<String, String> labelsSelectors;

                try {
                    labelsSelectors = GTSHelper.parseLabelsSelectors(labelsSelection);
                } catch (ParseException pe) {
                    throw new IOException(pe);
                }

                //
                // Force 'producer'/'owner'/'app' from token
                //

                labelsSelectors.remove(Constants.PRODUCER_LABEL);
                labelsSelectors.remove(Constants.OWNER_LABEL);
                labelsSelectors.remove(Constants.APPLICATION_LABEL);

                labelsSelectors.putAll(Tokens.labelSelectorsFromReadToken(rtoken));

                List<Metadata> metas = null;

                List<String> clsSels = new ArrayList<String>();
                List<Map<String, String>> lblsSels = new ArrayList<Map<String, String>>();

                clsSels.add(classSelector);
                lblsSels.add(labelsSelectors);

                try {
                    metas = directoryClient.find(clsSels, lblsSels);
                    metadatas.addAll(metas);
                } catch (Exception e) {
                    //
                    // If metadatas is not empty, create an iterator for it, then clear it
                    //
                    if (!metadatas.isEmpty()) {
                        iterators.add(metadatas.iterator());
                        metadatas.clear();
                    }
                    iterators.add(directoryClient.iterator(clsSels, lblsSels));
                }
            }
        } else {
            //
            // Add an iterator which reads splits from the request body
            //

            boolean gzipped = false;

            if (null != req.getHeader("Content-Type")
                    && "application/gzip".equals(req.getHeader("Content-Type"))) {
                gzipped = true;
            }

            BufferedReader br = null;

            if (gzipped) {
                GZIPInputStream is = new GZIPInputStream(req.getInputStream());
                br = new BufferedReader(new InputStreamReader(is));
            } else {
                br = req.getReader();
            }

            final BufferedReader fbr = br;

            MetadataIterator iterator = new MetadataIterator() {

                private List<Metadata> metadatas = new ArrayList<Metadata>();

                private boolean done = false;

                private String lasttoken = "";

                @Override
                public void close() throws Exception {
                    fbr.close();
                }

                @Override
                public Metadata next() {
                    if (!metadatas.isEmpty()) {
                        Metadata meta = metadatas.get(metadatas.size() - 1);
                        metadatas.remove(metadatas.size() - 1);
                        return meta;
                    } else {
                        if (hasNext()) {
                            return next();
                        } else {
                            throw new NoSuchElementException();
                        }
                    }
                }

                @Override
                public boolean hasNext() {
                    if (!metadatas.isEmpty()) {
                        return true;
                    }

                    if (done) {
                        return false;
                    }

                    String line = null;

                    try {
                        line = fbr.readLine();
                    } catch (IOException ioe) {
                        throw new RuntimeException(ioe);
                    }

                    if (null == line) {
                        done = true;
                        return false;
                    }

                    //
                    // Decode/Unwrap/Deserialize the split
                    //

                    byte[] data = OrderPreservingBase64.decode(line.getBytes(Charsets.US_ASCII));
                    if (null != fetchAES) {
                        data = CryptoUtils.unwrap(fetchAES, data);
                    }

                    if (null == data) {
                        throw new RuntimeException("Invalid wrapped content.");
                    }

                    TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());

                    GTSSplit split = new GTSSplit();

                    try {
                        deserializer.deserialize(split, data);
                    } catch (TException te) {
                        throw new RuntimeException(te);
                    }

                    //
                    // Check the expiry
                    //

                    long instant = System.currentTimeMillis();

                    if (instant - split.getTimestamp() > maxSplitAge || instant > split.getExpiry()) {
                        throw new RuntimeException("Split has expired.");
                    }

                    this.metadatas.addAll(split.getMetadatas());

                    // We assume there was at least one metadata instance in the split!!!
                    return true;
                }
            };

            iterators.add(iterator);
        }

        List<Metadata> metas = new ArrayList<Metadata>();
        metas.addAll(metadatas);

        if (!metas.isEmpty()) {
            iterators.add(metas.iterator());
        }

        //
        // Loop over the iterators, storing the read metadata to a temporary file encrypted on disk
        // Data is encrypted using a onetime pad
        //

        final byte[] onetimepad = new byte[(int) Math.min(65537, System.currentTimeMillis() % 100000)];
        new Random().nextBytes(onetimepad);

        final File cache = File.createTempFile(
                Long.toHexString(System.currentTimeMillis()) + "-" + Long.toHexString(System.nanoTime()),
                ".dircache");
        cache.deleteOnExit();

        FileWriter writer = new FileWriter(cache);

        TSerializer serializer = new TSerializer(new TCompactProtocol.Factory());

        int padidx = 0;

        for (Iterator<Metadata> itermeta : iterators) {
            try {
                while (itermeta.hasNext()) {
                    Metadata metadata = itermeta.next();

                    try {
                        byte[] bytes = serializer.serialize(metadata);
                        // Apply onetimepad
                        for (int i = 0; i < bytes.length; i++) {
                            bytes[i] = (byte) (bytes[i] ^ onetimepad[padidx++]);
                            if (padidx >= onetimepad.length) {
                                padidx = 0;
                            }
                        }
                        OrderPreservingBase64.encodeToWriter(bytes, writer);
                        writer.write('\n');
                    } catch (TException te) {
                    }
                }

                if (!itermeta.hasNext() && (itermeta instanceof MetadataIterator)) {
                    try {
                        ((MetadataIterator) itermeta).close();
                    } catch (Exception e) {
                    }
                }
            } catch (Throwable t) {
                throw t;
            } finally {
                if (itermeta instanceof MetadataIterator) {
                    try {
                        ((MetadataIterator) itermeta).close();
                    } catch (Exception e) {
                    }
                }
            }
        }

        writer.close();

        //
        // Create an iterator based on the cache
        //

        MetadataIterator cacheiterator = new MetadataIterator() {

            BufferedReader reader = new BufferedReader(new FileReader(cache));

            private Metadata current = null;
            private boolean done = false;

            private TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());

            int padidx = 0;

            @Override
            public boolean hasNext() {
                if (done) {
                    return false;
                }

                if (null != current) {
                    return true;
                }

                try {
                    String line = reader.readLine();
                    if (null == line) {
                        done = true;
                        return false;
                    }
                    byte[] raw = OrderPreservingBase64.decode(line.getBytes(Charsets.US_ASCII));
                    // Apply one time pad
                    for (int i = 0; i < raw.length; i++) {
                        raw[i] = (byte) (raw[i] ^ onetimepad[padidx++]);
                        if (padidx >= onetimepad.length) {
                            padidx = 0;
                        }
                    }
                    Metadata metadata = new Metadata();
                    try {
                        deserializer.deserialize(metadata, raw);
                        this.current = metadata;
                        return true;
                    } catch (TException te) {
                        LOG.error("", te);
                    }
                } catch (IOException ioe) {
                    LOG.error("", ioe);
                }

                return false;
            }

            @Override
            public Metadata next() {
                if (null != this.current) {
                    Metadata metadata = this.current;
                    this.current = null;
                    return metadata;
                } else {
                    throw new NoSuchElementException();
                }
            }

            @Override
            public void close() throws Exception {
                this.reader.close();
                cache.delete();
            }
        };

        iterators.clear();
        iterators.add(cacheiterator);

        metas = new ArrayList<Metadata>();

        PrintWriter pw = resp.getWriter();

        AtomicReference<Metadata> lastMeta = new AtomicReference<Metadata>(null);
        AtomicLong lastCount = new AtomicLong(0L);

        long fetchtimespan = timespan;

        for (Iterator<Metadata> itermeta : iterators) {
            while (itermeta.hasNext()) {
                metas.add(itermeta.next());

                //
                // Access the data store every 'FETCH_BATCHSIZE' GTS or at the end of each iterator
                //

                if (metas.size() > FETCH_BATCHSIZE || !itermeta.hasNext()) {
                    try (GTSDecoderIterator iterrsc = storeClient.fetch(rtoken, metas, now, fetchtimespan,
                            fromArchive, writeTimestamp)) {
                        GTSDecoderIterator iter = iterrsc;

                        if (unpack) {
                            iter = new UnpackingGTSDecoderIterator(iter, suffix);
                            timespan = Long.MIN_VALUE + 1;
                        }

                        if ("text".equals(format)) {
                            textDump(pw, iter, now, timespan, false, dedup, signed, showAttr, lastMeta,
                                    lastCount, sortMeta);
                        } else if ("fulltext".equals(format)) {
                            textDump(pw, iter, now, timespan, true, dedup, signed, showAttr, lastMeta,
                                    lastCount, sortMeta);
                        } else if ("raw".equals(format)) {
                            rawDump(pw, iter, dedup, signed, timespan, lastMeta, lastCount, sortMeta);
                        } else if ("wrapper".equals(format)) {
                            wrapperDump(pw, iter, dedup, signed, fetchPSK, timespan, lastMeta, lastCount);
                        } else if ("json".equals(format)) {
                            jsonDump(pw, iter, now, timespan, dedup, signed, lastMeta, lastCount);
                        } else if ("tsv".equals(format)) {
                            tsvDump(pw, iter, now, timespan, false, dedup, signed, lastMeta, lastCount,
                                    sortMeta);
                        } else if ("fulltsv".equals(format)) {
                            tsvDump(pw, iter, now, timespan, true, dedup, signed, lastMeta, lastCount,
                                    sortMeta);
                        } else if ("pack".equals(format)) {
                            packedDump(pw, iter, now, timespan, dedup, signed, lastMeta, lastCount,
                                    maxDecoderLen, suffix, chunksize, sortMeta);
                        } else if ("null".equals(format)) {
                            nullDump(iter);
                        } else {
                            textDump(pw, iter, now, timespan, false, dedup, signed, showAttr, lastMeta,
                                    lastCount, sortMeta);
                        }
                    } catch (Throwable t) {
                        LOG.error("", t);
                        Sensision.update(SensisionConstants.CLASS_WARP_FETCH_ERRORS, Sensision.EMPTY_LABELS, 1);
                        if (showErrors) {
                            pw.println();
                            StringWriter sw = new StringWriter();
                            PrintWriter pw2 = new PrintWriter(sw);
                            t.printStackTrace(pw2);
                            pw2.close();
                            sw.flush();
                            String error = URLEncoder.encode(sw.toString(), "UTF-8");
                            pw.println(Constants.EGRESS_FETCH_ERROR_PREFIX + error);
                        }
                        throw new IOException(t);
                    } finally {
                        if (!itermeta.hasNext() && (itermeta instanceof MetadataIterator)) {
                            try {
                                ((MetadataIterator) itermeta).close();
                            } catch (Exception e) {
                            }
                        }
                    }

                    //
                    // Reset 'metas'
                    //

                    metas.clear();
                }
            }

            if (!itermeta.hasNext() && (itermeta instanceof MetadataIterator)) {
                try {
                    ((MetadataIterator) itermeta).close();
                } catch (Exception e) {
                }
            }
        }

        Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_FETCH_REQUESTS, labels, 1);
    } catch (Exception e) {
        if (!resp.isCommitted()) {
            resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, e.getMessage());
            return;
        }
    }
}

From source file:com.bdaum.zoom.gps.internal.operations.GeotagOperation.java

private IStatus execute(IProgressMonitor aMonitor, IAdaptable info, boolean redo) {
    if (gpsConfiguration == null)
        return Status.CANCEL_STATUS;
    resumeLater = false;/*from w ww  .  ja va  2s.c  o  m*/
    tagged = 0;
    int work = 0;
    int f = 1;
    if (trackpoints.length > 0)
        work = f = 2;
    int l = assetIds.length;
    work += f * l;
    backups = new Backup[l];
    init(aMonitor, work);
    Meta meta = dbManager.getMeta(true);
    Set<String> postponed = meta.getPostponedNaming();
    processed.clear();
    if (trackpoints.length > 0) {
        aMonitor.subTask(removeTag ? Messages.getString("GeotagOperation.removing_tags") //$NON-NLS-1$
                : Messages.getString("GeotagOperation.Tagging")); //$NON-NLS-1$
        List<Asset> assets = new ArrayList<Asset>(assetIds.length);
        int i = 0;
        for (String assetId : assetIds) {
            if (!isPeerOwned(assetId)) {
                AssetImpl asset = dbManager.obtainAsset(assetId);
                if (asset != null) {
                    if (removeTag) {
                        try {
                            backups[i] = new Backup(opId, asset, QueryField.EXIF_GPSLONGITUDE,
                                    QueryField.EXIF_GPSLATITUDE, QueryField.EXIF_GPSIMAGEDIR);
                        } catch (Exception e) {
                            addError(Messages.getString("GeotagOperation.error_creating_backup"), e); //$NON-NLS-1$
                        }
                        asset.setGPSDestLatitude(Double.NaN);
                        asset.setGPSLongitude(Double.NaN);
                        asset.setGPSImgDirection(Double.NaN);
                        List<LocationCreatedImpl> rels = dbManager
                                .obtainStructForAsset(LocationCreatedImpl.class, assetId, true);
                        if (backups[i] != null)
                            backups[i].addAllDeleted(rels);
                        storeSafely(rels.toArray(), 1, asset);
                        ++tagged;
                    } else if (asset.getDateTimeOriginal() != null) {
                        if (aMonitor.isCanceled())
                            return close(info);
                        assets.add(asset);
                        if (tag(asset, meta, i))
                            ++tagged;
                        String gpsImgDirectionRef = asset.getGPSImgDirectionRef();
                        if (Double.isNaN(asset.getGPSImgDirection()) || gpsImgDirectionRef == null
                                || gpsImgDirectionRef.isEmpty())
                            for (LocationShownImpl locationShown : dbManager
                                    .obtainStructForAsset(LocationShownImpl.class, assetId, false)) {
                                LocationImpl loc = dbManager.obtainById(LocationImpl.class,
                                        locationShown.getLocation());
                                if (loc != null) {
                                    asset.setGPSImgDirection(Core.bearing(asset.getGPSLatitude(),
                                            asset.getGPSLongitude(), loc.getLatitude(), loc.getLongitude()));
                                    asset.setGPSImgDirectionRef("T"); //$NON-NLS-1$
                                    break;
                                }
                            }
                    }
                }
            }
            ++i;
        }
        addInfo(removeTag ? Messages.getString("GeotagOperation.0") //$NON-NLS-1$
                : NLS.bind(Messages.getString("GeotagOperation.n_images_tagged"), tagged, l - tagged)); //$NON-NLS-1$
        fireApplyRules(assets, QueryField.EXIF_GPS);
        fireAssetsModified(new BagChange<>(null, assets, null, null), QueryField.EXIF_GPS);
    }
    if (!removeTag && !aMonitor.isCanceled()) {
        aMonitor.subTask(Messages.getString("GeotagOperation.Geonaming_assets")); //$NON-NLS-1$
        int resumed = 0;
        if (postponed == null || postponed.isEmpty())
            assetsTobeNamed = assetIds;
        else {
            resumed = postponed.size();
            assetsTobeNamed = new String[resumed + assetIds.length];
            System.arraycopy(postponed.toArray(new String[resumed]), 0, assetsTobeNamed, 0, resumed);
            System.arraycopy(assetIds, 0, assetsTobeNamed, resumed, assetIds.length);
            postponed.clear();
        }
        try {
            int i = 0;
            List<Asset> assets = new ArrayList<Asset>(assetsTobeNamed.length);
            for (String assetId : assetsTobeNamed)
                if (!isPeerOwned(assetId)) {
                    AssetImpl asset = dbManager.obtainAsset(assetId);
                    if (asset != null) {
                        if (aMonitor.isCanceled())
                            return close(info);
                        geoname(meta, resumed, i, asset, aMonitor, info);
                        assets.add(asset);
                        if (resumeLater)
                            break;
                        ++i;
                    }
                }
            dbManager.storeAndCommit(meta);
            addInfo(NLS.bind(Messages.getString("GeotagOperation.n_images_decorated"), //$NON-NLS-1$
                    named, notnamed));
            if (!redo)
                fireApplyRules(assets, QueryField.IPTC_KEYWORDS);
            fireAssetsModified(redo ? null : new BagChange<>(null, assets, null, null),
                    QueryField.IPTC_LOCATIONCREATED);
        } catch (UnknownHostException e) {
            addError(Messages.getString("GeotagOperation.webservice_not_reached"), //$NON-NLS-1$
                    e);
        } catch (EOFException e) {
            addError(Messages.getString("GeotagOperation.geonaming_aborted"), null); //$NON-NLS-1$
        }
        for (Backup backup : backups)
            if (backup != null)
                dbManager.storeTrash(backup);
        dbManager.commitTrash();
        backups = null;
    }
    return close(info, processed.isEmpty() ? (String[]) null : processed.toArray(new String[processed.size()]));
}

From source file:org.apache.ambari.server.controller.AmbariManagementControllerImplTest.java

@Test
public void testScheduleSmokeTest() throws Exception {

    final String HOST1 = "host1";
    final String OS_TYPE = "centos5";
    final String STACK_ID = "HDP-2.0.1";
    final String CLUSTER_NAME = "c1";
    final String HDFS_SERVICE_CHECK_ROLE = "HDFS_SERVICE_CHECK";
    final String MAPREDUCE2_SERVICE_CHECK_ROLE = "MAPREDUCE2_SERVICE_CHECK";
    final String YARN_SERVICE_CHECK_ROLE = "YARN_SERVICE_CHECK";

    Map<String, String> mapRequestProps = Collections.<String, String>emptyMap();
    Injector injector = Guice.createInjector(new AbstractModule() {
        @Override/*from  w  ww .j  ava  2  s  .  com*/
        protected void configure() {
            Properties properties = new Properties();
            properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "in-memory");

            properties.setProperty(Configuration.METADETA_DIR_PATH, "src/test/resources/stacks");
            properties.setProperty(Configuration.SERVER_VERSION_FILE, "../version");
            properties.setProperty(Configuration.OS_VERSION_KEY, OS_TYPE);
            try {
                install(new ControllerModule(properties));
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }
    });
    injector.getInstance(GuiceJpaInitializer.class);

    try {
        AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
        Clusters clusters = injector.getInstance(Clusters.class);

        clusters.addHost(HOST1);
        Host host = clusters.getHost(HOST1);
        host.setOsType(OS_TYPE);
        host.persist();

        ClusterRequest clusterRequest = new ClusterRequest(null, CLUSTER_NAME, STACK_ID, null);
        amc.createCluster(clusterRequest);

        Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
        serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null, null));
        serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null, null));
        serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null, null));

        amc.createServices(serviceRequests);

        Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>();
        serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "NAMENODE", null, null));
        serviceComponentRequests
                .add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "SECONDARY_NAMENODE", null, null));
        serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "DATANODE", null, null));
        serviceComponentRequests
                .add(new ServiceComponentRequest(CLUSTER_NAME, "MAPREDUCE2", "HISTORYSERVER", null, null));
        serviceComponentRequests
                .add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "RESOURCEMANAGER", null, null));
        serviceComponentRequests
                .add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "NODEMANAGER", null, null));

        amc.createComponents(serviceComponentRequests);

        Set<HostRequest> hostRequests = new HashSet<HostRequest>();
        hostRequests.add(new HostRequest(HOST1, CLUSTER_NAME, null));

        amc.createHosts(hostRequests);

        Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>();
        componentHostRequests
                .add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "DATANODE", HOST1, null, null));
        componentHostRequests
                .add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "NAMENODE", HOST1, null, null));
        componentHostRequests.add(
                new ServiceComponentHostRequest(CLUSTER_NAME, null, "SECONDARY_NAMENODE", HOST1, null, null));
        componentHostRequests
                .add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "HISTORYSERVER", HOST1, null, null));
        componentHostRequests
                .add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "RESOURCEMANAGER", HOST1, null, null));
        componentHostRequests
                .add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "NODEMANAGER", HOST1, null, null));

        amc.createHostComponents(componentHostRequests);

        //Install services
        serviceRequests.clear();
        serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null, State.INSTALLED.name()));
        serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null, State.INSTALLED.name()));
        serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null, State.INSTALLED.name()));

        amc.updateServices(serviceRequests, mapRequestProps, true, false);

        Cluster cluster = clusters.getCluster(CLUSTER_NAME);

        for (String serviceName : cluster.getServices().keySet()) {

            for (String componentName : cluster.getService(serviceName).getServiceComponents().keySet()) {

                Map<String, ServiceComponentHost> serviceComponentHosts = cluster.getService(serviceName)
                        .getServiceComponent(componentName).getServiceComponentHosts();

                for (Map.Entry<String, ServiceComponentHost> entry : serviceComponentHosts.entrySet()) {
                    ServiceComponentHost cHost = entry.getValue();
                    cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(),
                            cHost.getHostName(), System.currentTimeMillis(), STACK_ID));
                    cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(),
                            cHost.getHostName(), System.currentTimeMillis()));
                }
            }
        }

        //Start services
        serviceRequests.clear();
        serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null, State.STARTED.name()));
        serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null, State.STARTED.name()));
        serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null, State.STARTED.name()));

        RequestStatusResponse response = amc.updateServices(serviceRequests, mapRequestProps, true, false);

        Collection<?> hdfsSmokeTasks = CollectionUtils.select(response.getTasks(),
                new RolePredicate(HDFS_SERVICE_CHECK_ROLE));
        //Ensure that smoke test task was created for HDFS
        assertEquals(1, hdfsSmokeTasks.size());

        Collection<?> mapreduce2SmokeTasks = CollectionUtils.select(response.getTasks(),
                new RolePredicate(MAPREDUCE2_SERVICE_CHECK_ROLE));
        //Ensure that smoke test task was created for MAPREDUCE2
        assertEquals(1, mapreduce2SmokeTasks.size());

        Collection<?> yarnSmokeTasks = CollectionUtils.select(response.getTasks(),
                new RolePredicate(YARN_SERVICE_CHECK_ROLE));
        //Ensure that smoke test task was created for YARN
        assertEquals(1, yarnSmokeTasks.size());
    } finally {
        injector.getInstance(PersistService.class).stop();
    }
}

From source file:com.google.gwt.emultest.java.util.TreeMapTest.java

public void testNavigableKeySet() {
    K[] keys = getSortedKeys();//from  w w  w .java2 s.c om
    V[] values = getSortedValues();
    NavigableMap<K, V> map = createNavigableMap();
    map.put(keys[0], values[0]);

    Set<K> keySet = map.navigableKeySet();
    _assertEquals(keySet, map.navigableKeySet());

    map.put(keys[1], values[1]);
    map.put(keys[2], values[2]);
    _assertEquals(map.navigableKeySet(), keySet);
    _assertEquals(keySet, keySet);

    try {
        keySet.add(keys[3]);
        fail("should throw UnsupportedOperationException");
    } catch (UnsupportedOperationException expected) {
    }
    try {
        keySet.add(null);
        fail("should throw UnsupportedOperationException");
    } catch (UnsupportedOperationException expected) {
    }
    try {
        keySet.addAll(null);
        fail("should throw NullPointerException");
    } catch (NullPointerException expected) {
    }
    Collection<K> collection = new ArrayList<K>();
    keySet.addAll(collection);
    try {
        collection.add(keys[3]);
        keySet.addAll(collection);
        fail("should throw UnsupportedOperationException");
    } catch (UnsupportedOperationException expected) {
    }

    Iterator<K> iter = keySet.iterator();
    iter.next();
    iter.remove();
    assertFalse(map.containsKey(keys[0]));

    collection = new ArrayList<K>();
    collection.add(keys[2]);
    keySet.retainAll(collection);
    assertEquals(1, map.size());
    assertTrue(keySet.contains(keys[2]));

    keySet.removeAll(collection);
    _assertEmpty(map);

    map.put(keys[0], values[0]);
    assertEquals(1, map.size());
    assertTrue(keySet.contains(keys[0]));

    keySet.clear();
    _assertEmpty(map);
}

From source file:org.alfresco.repo.node.db.DbNodeServiceImpl.java

/**
 * Delete a node/*from   w  w w  . j  a v  a  2  s  .c  om*/
 * 
 * @param nodeRef           the node to delete
 * @param allowArchival     <tt>true</tt> if normal archival may occur or
 *                          <tt>false</tt> if the node must be forcibly deleted
 */
private void deleteNode(NodeRef nodeRef, boolean allowArchival) {
    // The node(s) involved may not be pending deletion
    checkPendingDelete(nodeRef);

    // Pair contains NodeId, NodeRef
    Pair<Long, NodeRef> nodePair = getNodePairNotNull(nodeRef);
    Long nodeId = nodePair.getFirst();

    Boolean requiresDelete = null;

    // get type and aspect QNames as they will be unavailable after the delete
    QName nodeTypeQName = nodeDAO.getNodeType(nodeId);
    Set<QName> nodeAspectQNames = nodeDAO.getNodeAspects(nodeId);

    // Have we been asked to delete a store?
    if (nodeTypeQName.equals(ContentModel.TYPE_STOREROOT)) {
        throw new IllegalArgumentException("A store root node cannot be deleted: " + nodeRef);
    }

    // get the primary parent-child relationship before it is gone
    Pair<Long, ChildAssociationRef> childAssocPair = nodeDAO.getPrimaryParentAssoc(nodeId);
    ChildAssociationRef childAssocRef = childAssocPair.getSecond();

    // Is this store 
    StoreRef storeRef = nodeRef.getStoreRef();
    StoreRef archiveStoreRef = storeArchiveMap.get(storeRef);

    // Gather information about the hierarchy
    NodeHierarchyWalker walker = new NodeHierarchyWalker(nodeDAO);
    walker.walkHierarchy(nodePair, childAssocPair);

    // Protect the nodes from being link/unlinked for the remainder of the process
    Set<NodeRef> nodesPendingDelete = new HashSet<NodeRef>(walker.getNodes(false).size());
    for (VisitedNode visitedNode : walker.getNodes(true)) {
        nodesPendingDelete.add(visitedNode.nodeRef);
    }
    Set<NodeRef> nodesPendingDeleteTxn = TransactionalResourceHelper.getSet(KEY_PENDING_DELETE_NODES);
    nodesPendingDeleteTxn.addAll(nodesPendingDelete); // We need to remove these later, again

    // Work out whether we need to archive or delete the node.
    if (!allowArchival) {
        // No archival allowed
        requiresDelete = true;
    } else if (archiveStoreRef == null) {
        // The store does not specify archiving
        requiresDelete = true;
    } else {
        // get the type and check if we need archiving.
        TypeDefinition typeDef = dictionaryService.getType(nodeTypeQName);
        if (typeDef != null) {
            Boolean requiresArchive = typeDef.getArchive();
            if (requiresArchive != null) {
                requiresDelete = !requiresArchive;
            }
        }

        // If the type hasn't asked for deletion, check whether any applied aspects have
        Iterator<QName> i = nodeAspectQNames.iterator();
        while ((requiresDelete == null || !requiresDelete) && i.hasNext()) {
            QName nodeAspectQName = i.next();
            AspectDefinition aspectDef = dictionaryService.getAspect(nodeAspectQName);
            if (aspectDef != null) {
                Boolean requiresArchive = aspectDef.getArchive();
                if (requiresArchive != null) {
                    requiresDelete = !requiresArchive;
                }
            }
        }
    }

    // Propagate timestamps
    propagateTimeStamps(childAssocRef);

    // Archive, if necessary
    boolean archive = requiresDelete != null && !requiresDelete.booleanValue();

    // Fire pre-delete events
    Set<Long> childAssocIds = new HashSet<Long>(23); // Prevents duplicate firing
    Set<Long> peerAssocIds = new HashSet<Long>(23); // Prevents duplicate firing
    List<VisitedNode> nodesToDelete = walker.getNodes(true);
    for (VisitedNode nodeToDelete : nodesToDelete) {
        // Target associations
        for (Pair<Long, AssociationRef> targetAssocPair : nodeToDelete.targetAssocs) {
            if (!peerAssocIds.add(targetAssocPair.getFirst())) {
                continue; // Already fired
            }
            invokeBeforeDeleteAssociation(targetAssocPair.getSecond());
        }
        // Source associations
        for (Pair<Long, AssociationRef> sourceAssocPair : nodeToDelete.sourceAssocs) {
            if (!peerAssocIds.add(sourceAssocPair.getFirst())) {
                continue; // Already fired
            }
            invokeBeforeDeleteAssociation(sourceAssocPair.getSecond());
        }
        // Secondary child associations
        for (Pair<Long, ChildAssociationRef> secondaryChildAssocPair : nodeToDelete.secondaryChildAssocs) {
            if (!childAssocIds.add(secondaryChildAssocPair.getFirst())) {
                continue; // Already fired
            }
            invokeBeforeDeleteChildAssociation(secondaryChildAssocPair.getSecond());
        }
        // Secondary parent associations
        for (Pair<Long, ChildAssociationRef> secondaryParentAssocPair : nodeToDelete.secondaryParentAssocs) {
            if (!childAssocIds.add(secondaryParentAssocPair.getFirst())) {
                continue; // Already fired
            }
            invokeBeforeDeleteChildAssociation(secondaryParentAssocPair.getSecond());
        }

        // Primary child associations
        if (archive) {
            invokeBeforeArchiveNode(nodeToDelete.nodeRef);
        }
        invokeBeforeDeleteNode(nodeToDelete.nodeRef);
    }

    // Archive, if necessary
    if (archive) {
        // Archive node
        archiveHierarchy(walker, archiveStoreRef);
    }

    // Delete/Archive and fire post-delete events incl. updating indexes
    childAssocIds.clear(); // Prevents duplicate firing
    peerAssocIds.clear(); // Prevents duplicate firing
    for (VisitedNode nodeToDelete : nodesToDelete) {
        // Target associations
        for (Pair<Long, AssociationRef> targetAssocPair : nodeToDelete.targetAssocs) {
            if (!peerAssocIds.add(targetAssocPair.getFirst())) {
                continue; // Already fired
            }
            nodeDAO.removeNodeAssocs(Collections.singletonList(targetAssocPair.getFirst()));
            invokeOnDeleteAssociation(targetAssocPair.getSecond());
        }
        // Source associations
        for (Pair<Long, AssociationRef> sourceAssocPair : nodeToDelete.sourceAssocs) {
            if (!peerAssocIds.add(sourceAssocPair.getFirst())) {
                continue; // Already fired
            }
            nodeDAO.removeNodeAssocs(Collections.singletonList(sourceAssocPair.getFirst()));
            invokeOnDeleteAssociation(sourceAssocPair.getSecond());
        }
        // Secondary child associations
        for (Pair<Long, ChildAssociationRef> secondaryChildAssocPair : nodeToDelete.secondaryChildAssocs) {
            if (!childAssocIds.add(secondaryChildAssocPair.getFirst())) {
                continue; // Already fired
            }
            nodeDAO.deleteChildAssoc(secondaryChildAssocPair.getFirst());
            invokeOnDeleteChildAssociation(secondaryChildAssocPair.getSecond());
            nodeIndexer.indexDeleteChildAssociation(secondaryChildAssocPair.getSecond());
        }
        // Secondary parent associations
        for (Pair<Long, ChildAssociationRef> secondaryParentAssocPair : nodeToDelete.secondaryParentAssocs) {
            if (!childAssocIds.add(secondaryParentAssocPair.getFirst())) {
                continue; // Already fired
            }
            nodeDAO.deleteChildAssoc(secondaryParentAssocPair.getFirst());
            invokeOnDeleteChildAssociation(secondaryParentAssocPair.getSecond());
            nodeIndexer.indexDeleteChildAssociation(secondaryParentAssocPair.getSecond());
        }
        QName childNodeTypeQName = nodeDAO.getNodeType(nodeToDelete.id);
        Set<QName> childAspectQnames = nodeDAO.getNodeAspects(nodeToDelete.id);
        // Delete the node
        nodeDAO.deleteChildAssoc(nodeToDelete.primaryParentAssocPair.getFirst());
        nodeDAO.deleteNode(nodeToDelete.id);
        invokeOnDeleteNode(nodeToDelete.primaryParentAssocPair.getSecond(), childNodeTypeQName,
                childAspectQnames, archive);
        nodeIndexer.indexDeleteNode(nodeToDelete.primaryParentAssocPair.getSecond());
    }

    // Clear out the list of nodes pending delete
    nodesPendingDeleteTxn = TransactionalResourceHelper.getSet(KEY_PENDING_DELETE_NODES);
    nodesPendingDeleteTxn.removeAll(nodesPendingDelete);
}

From source file:com.android.messaging.datamodel.action.SyncCursorPair.java

long scan(final int maxMessagesToScan, final int maxMessagesToUpdate, final ArrayList<SmsMessage> smsToAdd,
        final LongSparseArray<MmsMessage> mmsToAdd, final ArrayList<LocalDatabaseMessage> messagesToDelete,
        final SyncManager.ThreadInfoCache threadInfoCache) {
    // Set of local messages matched with the timestamp of a remote message
    final Set<DatabaseMessage> matchedLocalMessages = Sets.newHashSet();
    // Set of remote messages matched with the timestamp of a local message
    final Set<DatabaseMessage> matchedRemoteMessages = Sets.newHashSet();
    long lastTimestampMillis = SYNC_STARTING;
    // Number of messages scanned local and remote
    int localCount = 0;
    int remoteCount = 0;
    // Seed the initial values of remote and local messages for comparison
    DatabaseMessage remoteMessage = mRemoteCursorsIterator.next();
    DatabaseMessage localMessage = mLocalCursorIterator.next();
    // Iterate through messages on both sides in reverse time order
    // Import messages in remote not in local, delete messages in local not in remote
    while (localCount + remoteCount < maxMessagesToScan
            && smsToAdd.size() + mmsToAdd.size() + messagesToDelete.size() < maxMessagesToUpdate) {
        if (remoteMessage == null && localMessage == null) {
            // No more message on both sides - scan complete
            lastTimestampMillis = SYNC_COMPLETE;
            break;
        } else if ((remoteMessage == null && localMessage != null)
                || (localMessage != null && remoteMessage != null
                        && localMessage.getTimestampInMillis() > remoteMessage.getTimestampInMillis())) {
            // Found a local message that is not in remote db
            // Delete the local message
            messagesToDelete.add((LocalDatabaseMessage) localMessage);
            lastTimestampMillis = Math.min(lastTimestampMillis, localMessage.getTimestampInMillis());
            // Advance to next local message
            localMessage = mLocalCursorIterator.next();
            localCount += 1;/*from   ww  w  . j  a v  a2s.c  o  m*/
        } else if ((localMessage == null && remoteMessage != null)
                || (localMessage != null && remoteMessage != null
                        && localMessage.getTimestampInMillis() < remoteMessage.getTimestampInMillis())) {
            // Found a remote message that is not in local db
            // Add the remote message
            saveMessageToAdd(smsToAdd, mmsToAdd, remoteMessage, threadInfoCache);
            lastTimestampMillis = Math.min(lastTimestampMillis, remoteMessage.getTimestampInMillis());
            // Advance to next remote message
            remoteMessage = mRemoteCursorsIterator.next();
            remoteCount += 1;
        } else {
            // Found remote and local messages at the same timestamp
            final long matchedTimestamp = localMessage.getTimestampInMillis();
            lastTimestampMillis = Math.min(lastTimestampMillis, matchedTimestamp);
            // Get the next local and remote messages
            final DatabaseMessage remoteMessagePeek = mRemoteCursorsIterator.next();
            final DatabaseMessage localMessagePeek = mLocalCursorIterator.next();
            // Check if only one message on each side matches the current timestamp
            // by looking at the next messages on both sides. If they are either null
            // (meaning no more messages) or having a different timestamp. We want
            // to optimize for this since this is the most common case when majority
            // of the messages are in sync (so they one-to-one pair up at each timestamp),
            // by not allocating the data structures required to compare a set of
            // messages from both sides.
            if ((remoteMessagePeek == null || remoteMessagePeek.getTimestampInMillis() != matchedTimestamp)
                    && (localMessagePeek == null
                            || localMessagePeek.getTimestampInMillis() != matchedTimestamp)) {
                // Optimize the common case where only one message on each side
                // that matches the same timestamp
                if (!remoteMessage.equals(localMessage)) {
                    // local != remote
                    // Delete local message
                    messagesToDelete.add((LocalDatabaseMessage) localMessage);
                    // Add remote message
                    saveMessageToAdd(smsToAdd, mmsToAdd, remoteMessage, threadInfoCache);
                }
                // Get next local and remote messages
                localMessage = localMessagePeek;
                remoteMessage = remoteMessagePeek;
                localCount += 1;
                remoteCount += 1;
            } else {
                // Rare case in which multiple messages are in the same timestamp
                // on either or both sides
                // Gather all the matched remote messages
                matchedRemoteMessages.clear();
                matchedRemoteMessages.add(remoteMessage);
                remoteCount += 1;
                remoteMessage = remoteMessagePeek;
                while (remoteMessage != null && remoteMessage.getTimestampInMillis() == matchedTimestamp) {
                    Assert.isTrue(!matchedRemoteMessages.contains(remoteMessage));
                    matchedRemoteMessages.add(remoteMessage);
                    remoteCount += 1;
                    remoteMessage = mRemoteCursorsIterator.next();
                }
                // Gather all the matched local messages
                matchedLocalMessages.clear();
                matchedLocalMessages.add(localMessage);
                localCount += 1;
                localMessage = localMessagePeek;
                while (localMessage != null && localMessage.getTimestampInMillis() == matchedTimestamp) {
                    if (matchedLocalMessages.contains(localMessage)) {
                        // Duplicate message is local database is deleted
                        messagesToDelete.add((LocalDatabaseMessage) localMessage);
                    } else {
                        matchedLocalMessages.add(localMessage);
                    }
                    localCount += 1;
                    localMessage = mLocalCursorIterator.next();
                }
                // Delete messages local only
                for (final DatabaseMessage msg : Sets.difference(matchedLocalMessages, matchedRemoteMessages)) {
                    messagesToDelete.add((LocalDatabaseMessage) msg);
                }
                // Add messages remote only
                for (final DatabaseMessage msg : Sets.difference(matchedRemoteMessages, matchedLocalMessages)) {
                    saveMessageToAdd(smsToAdd, mmsToAdd, msg, threadInfoCache);
                }
            }
        }
    }
    return lastTimestampMillis;
}

From source file:org.broadinstitute.sting.utils.variantcontext.VariantContextUtils.java

/**
 * Merges VariantContexts into a single hybrid.  Takes genotypes for common samples in priority order, if provided.
 * If uniqifySamples is true, the priority order is ignored and names are created by concatenating the VC name with
 * the sample name/*from   ww w.j a  v a2s.c o  m*/
 *
 * @param genomeLocParser           loc parser
 * @param unsortedVCs               collection of unsorted VCs
 * @param priorityListOfVCs         priority list detailing the order in which we should grab the VCs
 * @param filteredRecordMergeType   merge type for filtered records
 * @param genotypeMergeOptions      merge option for genotypes
 * @param annotateOrigin            should we annotate the set it came from?
 * @param printMessages             should we print messages?
 * @param inputRefBase              the ref base
 * @param setKey                    the key name of the set
 * @param filteredAreUncalled       are filtered records uncalled?
 * @param mergeInfoWithMaxAC        should we merge in info from the VC with maximum allele count?
 * @return new VariantContext
 */
public static VariantContext simpleMerge(GenomeLocParser genomeLocParser,
        Collection<VariantContext> unsortedVCs, List<String> priorityListOfVCs,
        FilteredRecordMergeType filteredRecordMergeType, GenotypeMergeType genotypeMergeOptions,
        boolean annotateOrigin, boolean printMessages, byte inputRefBase, String setKey,
        boolean filteredAreUncalled, boolean mergeInfoWithMaxAC) {
    if (unsortedVCs == null || unsortedVCs.size() == 0)
        return null;

    if (annotateOrigin && priorityListOfVCs == null)
        throw new IllegalArgumentException(
                "Cannot merge calls and annotate their origins without a complete priority list of VariantContexts");

    if (genotypeMergeOptions == GenotypeMergeType.REQUIRE_UNIQUE)
        verifyUniqueSampleNames(unsortedVCs);

    List<VariantContext> prepaddedVCs = sortVariantContextsByPriority(unsortedVCs, priorityListOfVCs,
            genotypeMergeOptions);
    // Make sure all variant contexts are padded with reference base in case of indels if necessary
    List<VariantContext> VCs = new ArrayList<VariantContext>();

    for (VariantContext vc : prepaddedVCs) {
        // also a reasonable place to remove filtered calls, if needed
        if (!filteredAreUncalled || vc.isNotFiltered())
            VCs.add(VariantContext.createVariantContextWithPaddedAlleles(vc, inputRefBase, false));
    }
    if (VCs.size() == 0) // everything is filtered out and we're filteredAreUncalled
        return null;

    // establish the baseline info from the first VC
    VariantContext first = VCs.get(0);
    String name = first.getSource();
    GenomeLoc loc = getLocation(genomeLocParser, first);

    Set<Allele> alleles = new TreeSet<Allele>();
    Map<String, Genotype> genotypes = new TreeMap<String, Genotype>();
    double negLog10PError = -1;
    Set<String> filters = new TreeSet<String>();
    Map<String, Object> attributes = new TreeMap<String, Object>();
    Set<String> inconsistentAttributes = new HashSet<String>();
    String rsID = null;
    int depth = 0;
    int maxAC = -1;
    Map<String, Object> attributesWithMaxAC = new TreeMap<String, Object>();
    VariantContext vcWithMaxAC = null;

    // counting the number of filtered and variant VCs
    int nFiltered = 0, nVariant = 0;

    Allele refAllele = determineReferenceAllele(VCs);
    boolean remapped = false;

    // cycle through and add info from the other VCs, making sure the loc/reference matches

    for (VariantContext vc : VCs) {
        if (loc.getStart() != vc.getStart()) // || !first.getReference().equals(vc.getReference()) )
            throw new ReviewedStingException(
                    "BUG: attempting to merge VariantContexts with different start sites: first="
                            + first.toString() + " second=" + vc.toString());

        if (getLocation(genomeLocParser, vc).size() > loc.size())
            loc = getLocation(genomeLocParser, vc); // get the longest location

        nFiltered += vc.isFiltered() ? 1 : 0;
        nVariant += vc.isVariant() ? 1 : 0;

        AlleleMapper alleleMapping = resolveIncompatibleAlleles(refAllele, vc, alleles);
        remapped = remapped || alleleMapping.needsRemapping();

        alleles.addAll(alleleMapping.values());

        mergeGenotypes(genotypes, vc, alleleMapping, genotypeMergeOptions == GenotypeMergeType.UNIQUIFY);

        negLog10PError = Math.max(negLog10PError, vc.isVariant() ? vc.getNegLog10PError() : -1);

        filters.addAll(vc.getFilters());

        //
        // add attributes
        //
        // special case DP (add it up) and ID (just preserve it)
        //
        if (vc.hasAttribute(VCFConstants.DEPTH_KEY))
            depth += Integer.valueOf(vc.getAttributeAsString(VCFConstants.DEPTH_KEY));
        if (rsID == null && vc.hasID())
            rsID = vc.getID();
        if (mergeInfoWithMaxAC && vc.hasAttribute(VCFConstants.ALLELE_COUNT_KEY)) {
            String rawAlleleCounts = vc.getAttributeAsString(VCFConstants.ALLELE_COUNT_KEY);
            // lets see if the string contains a , separator
            if (rawAlleleCounts.contains(VCFConstants.INFO_FIELD_ARRAY_SEPARATOR)) {
                List<String> alleleCountArray = Arrays
                        .asList(rawAlleleCounts.substring(1, rawAlleleCounts.length() - 1)
                                .split(VCFConstants.INFO_FIELD_ARRAY_SEPARATOR));
                for (String alleleCount : alleleCountArray) {
                    final int ac = Integer.valueOf(alleleCount.trim());
                    if (ac > maxAC) {
                        maxAC = ac;
                        vcWithMaxAC = vc;
                    }
                }
            } else {
                final int ac = Integer.valueOf(rawAlleleCounts);
                if (ac > maxAC) {
                    maxAC = ac;
                    vcWithMaxAC = vc;
                }
            }
        }

        for (Map.Entry<String, Object> p : vc.getAttributes().entrySet()) {
            String key = p.getKey();
            // if we don't like the key already, don't go anywhere
            if (!inconsistentAttributes.contains(key)) {
                boolean alreadyFound = attributes.containsKey(key);
                Object boundValue = attributes.get(key);
                boolean boundIsMissingValue = alreadyFound && boundValue.equals(VCFConstants.MISSING_VALUE_v4);

                if (alreadyFound && !boundValue.equals(p.getValue()) && !boundIsMissingValue) {
                    // we found the value but we're inconsistent, put it in the exclude list
                    //System.out.printf("Inconsistent INFO values: %s => %s and %s%n", key, boundValue, p.getValue());
                    inconsistentAttributes.add(key);
                    attributes.remove(key);
                } else if (!alreadyFound || boundIsMissingValue) { // no value
                    //if ( vc != first ) System.out.printf("Adding key %s => %s%n", p.getKey(), p.getValue());
                    attributes.put(key, p.getValue());
                }
            }
        }
    }

    // take the VC with the maxAC and pull the attributes into a modifiable map
    if (mergeInfoWithMaxAC && vcWithMaxAC != null) {
        attributesWithMaxAC.putAll(vcWithMaxAC.getAttributes());
    }

    // if at least one record was unfiltered and we want a union, clear all of the filters
    if (filteredRecordMergeType == FilteredRecordMergeType.KEEP_IF_ANY_UNFILTERED && nFiltered != VCs.size())
        filters.clear();

    // we care about where the call came from
    if (annotateOrigin) {
        String setValue;
        if (nFiltered == 0 && nVariant == priorityListOfVCs.size()) // nothing was unfiltered
            setValue = "Intersection";
        else if (nFiltered == VCs.size()) // everything was filtered out
            setValue = "FilteredInAll";
        else if (nVariant == 0) // everyone was reference
            setValue = "ReferenceInAll";
        else { // we are filtered in some subset
            List<String> s = new ArrayList<String>();
            for (VariantContext vc : VCs)
                if (vc.isVariant())
                    s.add(vc.isFiltered() ? "filterIn" + vc.getSource() : vc.getSource());
            setValue = Utils.join("-", s);
        }

        if (setKey != null) {
            attributes.put(setKey, setValue);
            if (mergeInfoWithMaxAC && vcWithMaxAC != null) {
                attributesWithMaxAC.put(setKey, vcWithMaxAC.getSource());
            }
        }
    }

    if (depth > 0)
        attributes.put(VCFConstants.DEPTH_KEY, String.valueOf(depth));
    if (rsID != null)
        attributes.put(VariantContext.ID_KEY, rsID);

    VariantContext merged = new VariantContext(name, loc.getContig(), loc.getStart(), loc.getStop(), alleles,
            genotypes, negLog10PError, filters, (mergeInfoWithMaxAC ? attributesWithMaxAC : attributes));
    // Trim the padded bases of all alleles if necessary
    merged = AbstractVCFCodec.createVariantContextWithTrimmedAlleles(merged);

    if (printMessages && remapped)
        System.out.printf("Remapped => %s%n", merged);
    return merged;
}

From source file:edu.brown.hstore.BatchPlanner.java

/**
 * @param txn_id/*from  w ww .  j  a v  a  2  s . c o m*/
 * @param client_handle
 * @param base_partition
 * @param predict_partitions
 * @param touched_partitions
 * @param batchArgs
 * @return
 */
public BatchPlan plan(Long txn_id, long client_handle, Integer base_partition,
        Collection<Integer> predict_partitions, boolean predict_singlepartitioned,
        Histogram<Integer> touched_partitions, ParameterSet[] batchArgs) {
    if (this.enable_profiling)
        time_plan.start();
    if (d)
        LOG.debug(String.format("Constructing a new %s BatchPlan for %s txn #%d", this.catalog_proc.getName(),
                (predict_singlepartitioned ? "single-partition" : "distributed"), txn_id));

    boolean cache_isSinglePartition[] = null;

    // OPTIMIZATION: Check whether we can use a cached single-partition BatchPlan
    if (this.force_singlePartition || this.enable_caching) {
        boolean is_allSinglePartition = true;
        cache_isSinglePartition = new boolean[this.batchSize];

        // OPTIMIZATION: Skip all of this if we know that we're always
        //               suppose to be single-partitioned
        if (this.force_singlePartition == false) {
            for (int stmt_index = 0; stmt_index < this.batchSize; stmt_index++) {
                if (cache_fastLookups[stmt_index] == null) {
                    if (d)
                        LOG.debug(String.format(
                                "[#%d-%02d] No fast look-ups for %s. Cache is marked as not single-partitioned",
                                txn_id, stmt_index, this.catalog_stmts[stmt_index].fullName()));
                    cache_isSinglePartition[stmt_index] = false;
                } else {
                    if (d)
                        LOG.debug(String.format("[#%d-%02d] Using fast-lookup caching for %s: %s", txn_id,
                                stmt_index, this.catalog_stmts[stmt_index].fullName(),
                                Arrays.toString(cache_fastLookups[stmt_index])));
                    Object params[] = batchArgs[stmt_index].toArray();
                    cache_isSinglePartition[stmt_index] = true;
                    for (int idx : cache_fastLookups[stmt_index]) {
                        if (hasher.hash(params[idx]) != base_partition.intValue()) {
                            cache_isSinglePartition[stmt_index] = false;
                            break;
                        }
                    } // FOR
                }
                if (d)
                    LOG.debug(String.format("[#%d-%02d] cache_isSinglePartition[%s] = %s", txn_id, stmt_index,
                            this.catalog_stmts[stmt_index].fullName(), cache_isSinglePartition[stmt_index]));
                is_allSinglePartition = is_allSinglePartition && cache_isSinglePartition[stmt_index];
            } // FOR (Statement)
        }
        if (t)
            LOG.trace(String.format("[#%d] is_allSinglePartition=%s", txn_id, is_allSinglePartition));

        // If all of the Statements are single-partition, then we can use
        // the cached BatchPlan if we already have one.
        // This saves a lot of trouble
        if (is_allSinglePartition && cache_singlePartitionPlans[base_partition.intValue()] != null) {
            if (d)
                LOG.debug(String.format("[#%d] Using cached BatchPlan at partition #%02d: %s", txn_id,
                        base_partition, Arrays.toString(this.catalog_stmts)));
            if (this.enable_profiling)
                time_plan.stop();
            return (cache_singlePartitionPlans[base_partition.intValue()]);
        }
    }

    // Otherwise we have to construct a new BatchPlan
    plan.init(client_handle, base_partition);

    // ----------------------
    // DEBUG DUMP
    // ----------------------
    if (t) {
        Map<String, Object> m = new ListOrderedMap<String, Object>();
        m.put("Batch Size", this.batchSize);
        for (int i = 0; i < this.batchSize; i++) {
            m.put(String.format("[%02d] %s", i, this.catalog_stmts[i].getName()),
                    Arrays.toString(batchArgs[i].toArray()));
        }
        LOG.trace("\n" + StringUtil.formatMapsBoxed(m));
    }

    // Only maintain the histogram of what partitions were touched if we
    // know that we're going to throw a MispredictionException
    Histogram<Integer> mispredict_h = null;
    boolean mispredict = false;

    for (int stmt_index = 0; stmt_index < this.batchSize; stmt_index++) {
        final Statement catalog_stmt = this.catalog_stmts[stmt_index];
        assert (catalog_stmt != null) : "The Statement at index " + stmt_index + " is null for "
                + this.catalog_proc;
        final Object params[] = batchArgs[stmt_index].toArray();
        if (t)
            LOG.trace(String.format("[#%d-%02d] Calculating touched partitions plans for %s", txn_id,
                    stmt_index, catalog_stmt.fullName()));

        Map<PlanFragment, Set<Integer>> frag_partitions = plan.frag_partitions[stmt_index];
        Set<Integer> stmt_all_partitions = plan.stmt_partitions[stmt_index];

        boolean has_singlepartition_plan = catalog_stmt.getHas_singlesited();
        boolean is_replicated_only = this.stmt_is_replicatedonly[stmt_index];
        boolean is_read_only = this.stmt_is_readonly[stmt_index];
        // boolean stmt_localFragsAreNonTransactional =
        // plan.localFragsAreNonTransactional;
        boolean is_singlepartition = has_singlepartition_plan;
        boolean is_local = true;
        CatalogMap<PlanFragment> fragments = null;

        // AbstractPlanNode node =
        // PlanNodeUtil.getRootPlanNodeForStatement(catalog_stmt, false);
        // LOG.info(PlanNodeUtil.debug(node));

        // OPTIMIZATION: Fast partition look-up caching
        // OPTIMIZATION: Read-only queries on replicated tables always just
        //               go to the local partition
        // OPTIMIZATION: If we're force to be single-partitioned, pretend
        //               that the table is replicated
        if (cache_isSinglePartition[stmt_index] || (is_replicated_only && is_read_only)
                || this.force_singlePartition) {
            if (t) {
                if (cache_isSinglePartition[stmt_index]) {
                    LOG.trace(String.format("[#%d-%02d] Using fast-lookup for %s. Skipping PartitionEstimator",
                            txn_id, stmt_index, catalog_stmt.fullName()));
                } else {
                    LOG.trace(String.format(
                            "[#%d-%02d] %s is read-only and replicate-only. Skipping PartitionEstimator",
                            txn_id, stmt_index, catalog_stmt.fullName()));
                }
            }
            assert (has_singlepartition_plan);

            if (this.cache_singlePartitionFragmentPartitions == null) {
                this.cache_singlePartitionFragmentPartitions = CACHED_FRAGMENT_PARTITION_MAPS[base_partition
                        .intValue()];
            }
            Map<PlanFragment, Set<Integer>> cached_frag_partitions = this.cache_singlePartitionFragmentPartitions
                    .get(catalog_stmt);
            if (cached_frag_partitions == null) {
                cached_frag_partitions = new HashMap<PlanFragment, Set<Integer>>();
                Set<Integer> p = CACHED_SINGLE_PARTITION_SETS[base_partition.intValue()];
                for (PlanFragment catalog_frag : catalog_stmt.getFragments().values()) {
                    cached_frag_partitions.put(catalog_frag, p);
                } // FOR
                this.cache_singlePartitionFragmentPartitions.put(catalog_stmt, cached_frag_partitions);
            }
            if (plan.stmt_partitions_swap[stmt_index] == null) {
                plan.stmt_partitions_swap[stmt_index] = plan.stmt_partitions[stmt_index];
                plan.frag_partitions_swap[stmt_index] = plan.frag_partitions[stmt_index];
            }
            stmt_all_partitions = plan.stmt_partitions[stmt_index] = CACHED_SINGLE_PARTITION_SETS[base_partition
                    .intValue()];
            frag_partitions = plan.frag_partitions[stmt_index] = cached_frag_partitions;
        }

        // Otherwise figure out whether the query can execute as
        // single-partitioned or not
        else {
            if (t)
                LOG.trace(String.format(
                        "[#%d-%02d] Computing touched partitions %s in txn #%d with the PartitionEstimator",
                        txn_id, stmt_index, catalog_stmt.fullName(), txn_id));

            if (plan.stmt_partitions_swap[stmt_index] != null) {
                stmt_all_partitions = plan.stmt_partitions[stmt_index] = plan.stmt_partitions_swap[stmt_index];
                plan.stmt_partitions_swap[stmt_index] = null;
                stmt_all_partitions.clear();

                frag_partitions = plan.frag_partitions[stmt_index] = plan.frag_partitions_swap[stmt_index];
                plan.frag_partitions_swap[stmt_index] = null;
            }

            try {
                // OPTIMIZATION: If we were told that the transaction is suppose to be 
                // single-partitioned, then we will throw the single-partitioned PlanFragments 
                // at the PartitionEstimator to get back what partitions each PlanFragment 
                // will need to go to. If we get multiple partitions, then we know that we 
                // mispredicted and we should throw a MispredictionException
                // If we originally didn't predict that it was single-partitioned, then we 
                // actually still need to check whether the query should be single-partitioned or not.
                // This is because a query may actually just want to execute on just one 
                // partition (note that it could be a local partition or the remote partition).
                // We'll assume that it's single-partition <<--- Can we cache that??
                while (true) {
                    if (is_singlepartition == false)
                        stmt_all_partitions.clear();
                    fragments = (is_singlepartition ? catalog_stmt.getFragments()
                            : catalog_stmt.getMs_fragments());

                    // PARTITION ESTIMATOR
                    if (this.enable_profiling)
                        ProfileMeasurement.swap(this.time_plan, this.time_partitionEstimator);
                    this.p_estimator.getAllFragmentPartitions(frag_partitions, stmt_all_partitions,
                            fragments.values(), params, base_partition);
                    if (this.enable_profiling)
                        ProfileMeasurement.swap(this.time_partitionEstimator, this.time_plan);

                    int stmt_all_partitions_size = stmt_all_partitions.size();
                    if (is_singlepartition && stmt_all_partitions_size > 1) {
                        // If this was suppose to be multi-partitioned, then
                        // we want to stop right here!!
                        if (predict_singlepartitioned) {
                            if (t)
                                LOG.trace(String.format("Mispredicted txn #%d - Multiple Partitions"));
                            mispredict = true;
                            break;
                        }
                        // Otherwise we can let it wrap back around and
                        // construct the fragment mapping for the
                        // multi-partition PlanFragments
                        is_singlepartition = false;
                        continue;
                    }
                    is_local = (stmt_all_partitions_size == 1 && stmt_all_partitions.contains(base_partition));
                    if (is_local == false && predict_singlepartitioned) {
                        // Again, this is not what was suppose to happen!
                        if (t)
                            LOG.trace(String.format("Mispredicted txn #%d - Remote Partitions %s", txn_id,
                                    stmt_all_partitions));
                        mispredict = true;
                        break;
                    } else if (predict_partitions.containsAll(stmt_all_partitions) == false) {
                        // Again, this is not what was suppose to happen!
                        if (t)
                            LOG.trace(String.format("Mispredicted txn #%d - Unallocated Partitions %s / %s",
                                    txn_id, stmt_all_partitions, predict_partitions));
                        mispredict = true;
                        break;
                    }
                    // Score! We have a plan that works!
                    break;
                } // WHILE
                  // Bad Mojo!
            } catch (Exception ex) {
                String msg = "";
                for (int i = 0; i < this.batchSize; i++) {
                    msg += String.format("[#%d-%02d] %s %s\n%5s\n", txn_id, i, catalog_stmt.fullName(),
                            catalog_stmt.getSqltext(), Arrays.toString(batchArgs[i].toArray()));
                } // FOR
                LOG.fatal("\n" + msg);
                throw new RuntimeException("Unexpected error when planning " + catalog_stmt.fullName(), ex);
            }
        }
        if (d)
            LOG.debug(String.format("[#%d-%02d] is_singlepartition=%s, partitions=%s", txn_id, stmt_index,
                    is_singlepartition, stmt_all_partitions));

        // Get a sorted list of the PlanFragments that we need to execute
        // for this query
        if (is_singlepartition) {
            if (this.sorted_singlep_fragments[stmt_index] == null) {
                this.sorted_singlep_fragments[stmt_index] = PlanNodeUtil.getSortedPlanFragments(catalog_stmt,
                        true);
            }
            plan.frag_list[stmt_index] = this.sorted_singlep_fragments[stmt_index];

            // Only mark that we touched these partitions if the Statement
            // is not on a replicated table
            if (is_replicated_only == false) {
                touched_partitions.putAll(stmt_all_partitions);
            }

        } else {
            if (this.sorted_multip_fragments[stmt_index] == null) {
                this.sorted_multip_fragments[stmt_index] = PlanNodeUtil.getSortedPlanFragments(catalog_stmt,
                        false);
            }
            plan.frag_list[stmt_index] = this.sorted_multip_fragments[stmt_index];

            // Always mark that we are touching these partitions
            touched_partitions.putAll(stmt_all_partitions);
        }

        plan.readonly = plan.readonly && catalog_stmt.getReadonly();
        // plan.localFragsAreNonTransactional =
        // plan.localFragsAreNonTransactional ||
        // stmt_localFragsAreNonTransactional;
        plan.all_singlepartitioned = plan.all_singlepartitioned && is_singlepartition;
        plan.all_local = plan.all_local && is_local;

        // Keep track of whether the current query in the batch was
        // single-partitioned or not
        plan.singlepartition_bitmap[stmt_index] = is_singlepartition;

        // Misprediction!!
        if (mispredict) {
            // If this is the first Statement in the batch that hits the mispredict, 
            // then we need to create the histogram and populate it with the 
            // partitions from the previous queries
            int start_idx = stmt_index;
            if (mispredict_h == null) {
                mispredict_h = new Histogram<Integer>();
                start_idx = 0;
            }
            for (int i = start_idx; i <= stmt_index; i++) {
                if (d)
                    LOG.debug(String.format(
                            "Pending mispredict for txn #%d. Checking whether to add partitions for batch statement %02d",
                            txn_id, i));

                // Make sure that we don't count the local partition if it
                // was reading a replicated table.
                if (this.stmt_is_replicatedonly[i] == false
                        || (this.stmt_is_replicatedonly[i] && this.stmt_is_readonly[i] == false)) {
                    if (t)
                        LOG.trace(String.format(
                                "%s touches non-replicated table. Including %d partitions in mispredict histogram for txn #%d",
                                this.catalog_stmts[i].fullName(), plan.stmt_partitions[i].size(), txn_id));
                    mispredict_h.putAll(plan.stmt_partitions[i]);
                }
            } // FOR
            continue;
        }

        // ----------------------
        // DEBUG DUMP
        // ----------------------
        if (d) {
            Map<?, ?> maps[] = new Map[fragments.size() + 1];
            int ii = 0;
            for (PlanFragment catalog_frag : fragments) {
                Map<String, Object> m = new ListOrderedMap<String, Object>();
                Set<Integer> p = plan.frag_partitions[stmt_index].get(catalog_frag);
                boolean frag_local = (p.size() == 1 && p.contains(base_partition));
                m.put(String.format("[%02d] Fragment", ii), catalog_frag.fullName());
                m.put(String.format("     Partitions"), p);
                m.put(String.format("     IsLocal"), frag_local);
                ii++;
                maps[ii] = m;
            } // FOR

            Map<String, Object> header = new ListOrderedMap<String, Object>();
            header.put("Batch Statement#", String.format("%02d / %02d", stmt_index, this.batchSize));
            header.put("Catalog Statement", catalog_stmt.fullName());
            header.put("Statement SQL", catalog_stmt.getSqltext());
            header.put("All Partitions", plan.stmt_partitions[stmt_index]);
            header.put("Local Partition", base_partition);
            header.put("IsSingledSited", is_singlepartition);
            header.put("IsStmtLocal", is_local);
            header.put("IsReplicatedOnly", is_replicated_only);
            header.put("IsBatchLocal", plan.all_local);
            header.put("Fragments", fragments.size());
            maps[0] = header;

            LOG.debug("\n" + StringUtil.formatMapsBoxed(maps));
        }
    } // FOR (Statement)

    // Check whether we have an existing graph exists for this batch
    // configuration
    // This is the only place where we need to synchronize
    int bitmap_hash = Arrays.hashCode(plan.singlepartition_bitmap);
    PlanGraph graph = this.plan_graphs.get(bitmap_hash);
    if (graph == null) { // assume fast case
        graph = this.buildPlanGraph(plan);
        this.plan_graphs.put(bitmap_hash, graph);
    }
    plan.graph = graph;
    plan.rounds_length = graph.num_rounds;

    if (this.enable_profiling)
        time_plan.stop();

    // Create the MispredictException if any Statement in the loop above hit
    // it. We don't want to throw it because whoever called us may want to look
    // at the plan first
    if (mispredict_h != null) {
        plan.mispredict = new MispredictionException(txn_id, mispredict_h);
    }
    // If this a single-partition plan and we have caching enabled, we'll
    // add this to our cached listing. We'll mark it as cached so that it is never
    // returned back to the BatchPlan object pool
    else if (this.enable_caching && cache_singlePartitionPlans[base_partition.intValue()] == null
            && plan.isSingledPartitionedAndLocal()) {
        cache_singlePartitionPlans[base_partition.intValue()] = plan;
        plan.cached = true;
        plan = new BatchPlan(this.maxRoundSize);
        return cache_singlePartitionPlans[base_partition.intValue()];
    }

    if (d)
        LOG.debug("Created BatchPlan:\n" + plan.toString());
    return (plan);
}