Example usage for org.hibernate Query list

List of usage examples for org.hibernate Query list

Introduction

In this page you can find the example usage for org.hibernate Query list.

Prototype

List<R> list();

Source Link

Document

Return the query results as a List.

Usage

From source file:au.org.theark.core.dao.StudyDao.java

License:Open Source License

private List<Long> getSubjectIdsForBiospecimenIds(List<Long> biospecimenIdsToInclude) {
    if (biospecimenIdsToInclude == null || biospecimenIdsToInclude.isEmpty()) {
        return new ArrayList<Long>();
    }//from  w  w  w.  j  ava  2  s . co m
    String queryString = "select bio.linkSubjectStudy.id from Biospecimen bio "
            + " where bio.id in (:biospecimenIdsToInclude) ";
    Query query = getSession().createQuery(queryString);
    query.setParameterList("biospecimenIdsToInclude", biospecimenIdsToInclude);
    return query.list();
}

From source file:au.org.theark.core.dao.StudyDao.java

License:Open Source License

private List<Long> getSubjectIdsForPhenoDataIds(List<Long> phenoDataIdsToInclude) {
    if (phenoDataIdsToInclude == null || phenoDataIdsToInclude.isEmpty()) {
        return new ArrayList<Long>();
    }//  w ww  .j a  v  a2s. c om
    String queryString = "select pheno.phenoDataSetCollection.linkSubjectStudy.id from PhenoDataSetData pheno "
            + " where pheno.id in (:phenoDataIdsToInclude) ";
    Query query = getSession().createQuery(queryString);
    query.setParameterList("phenoDataIdsToInclude", phenoDataIdsToInclude);
    return query.list();
}

From source file:au.org.theark.core.dao.StudyDao.java

License:Open Source License

private List<Long> getBiospecimenIdForSubjectIds(List<Long> subjectIds) {
    if (subjectIds == null || subjectIds.isEmpty()) {
        return new ArrayList<Long>();
    }/*from ww w. j av a2s  . co  m*/
    String queryString = "select bio.id from Biospecimen bio "
            + " where bio.linkSubjectStudy.id in (:subjectIds) ";
    Query query = getSession().createQuery(queryString);
    query.setParameterList("subjectIds", subjectIds);
    return query.list();
}

From source file:au.org.theark.core.dao.StudyDao.java

License:Open Source License

private List<Long> getSubjectIdsForBioCollectionIds(List<Long> bioCollectionIdsToInclude) {
    if (bioCollectionIdsToInclude == null || bioCollectionIdsToInclude.isEmpty()) {
        return new ArrayList<Long>();
    }/*from  ww w  .  j  a  v  a2  s .c om*/
    String queryString = "select bio.linkSubjectStudy.id from BioCollection bio "
            + " where bio.id in (:bioCollectionIdsToInclude) ";
    Query query = getSession().createQuery(queryString);
    query.setParameterList("bioCollectionIdsToInclude", bioCollectionIdsToInclude);
    return query.list();
}

From source file:au.org.theark.core.dao.StudyDao.java

License:Open Source License

private List<Long> getBioCollectionIdForSubjectIds(List<Long> subjectIds) {
    if (subjectIds == null || subjectIds.isEmpty()) {
        return new ArrayList<Long>();
    }//  ww  w. ja va2  s .  c  om
    String queryString = "select bc.id from BioCollection bc "
            + " where bc.linkSubjectStudy.id in (:subjectIds) ";
    Query query = getSession().createQuery(queryString);
    query.setParameterList("subjectIds", subjectIds);
    return query.list();
}

From source file:au.org.theark.core.dao.StudyDao.java

License:Open Source License

/**
 * This will get all the pheno data for the given subjects FOR THIS ONE CustomFieldGroup aka questionaire (aka data set)
 * /*from  www  .  j  a va 2  s .  com*/
 * @param allTheData
 * @param search
 * @param idsToInclude
 * @return the updated list of uids that are still left after the filtering. 
 */
private List<Long> applyPhenoDataSetFilters(DataExtractionVO allTheData, Search search,
        List<Long> idsToInclude) {

    Set<QueryFilter> filters = search.getQueryFilters();

    Collection<PhenoDataSetGroup> pdsgWithFilters = getPhenoDataSetGroupsForPhenoFilters(search, filters);
    List<Long> phenoCollectionIdsSoFar = new ArrayList<Long>();

    for (PhenoDataSetGroup phenoGroup : pdsgWithFilters) {
        log.info("Pheno group: " + phenoGroup.getName());
        if (idsToInclude != null && !idsToInclude.isEmpty()) {
            String queryToGetPhenoIdsForGivenSearchAndCFGFilters = getQueryForPhenoIdsForSearchAndCFGFilters(
                    search, phenoGroup);

            if (!queryToGetPhenoIdsForGivenSearchAndCFGFilters.isEmpty()) {
                Query query = getSession().createQuery(queryToGetPhenoIdsForGivenSearchAndCFGFilters);
                query.setParameterList("idList", idsToInclude);//TODO ASAP...this should be pheno list and not subjuid list now

                QueryTranslatorFactory translatorFactory = new ASTQueryTranslatorFactory();
                SessionFactoryImplementor factory = (SessionFactoryImplementor) getSession()
                        .getSessionFactory();
                QueryTranslator translator = translatorFactory.createQueryTranslator(query.getQueryString(),
                        query.getQueryString(), Collections.EMPTY_MAP, factory);
                translator.compile(Collections.EMPTY_MAP, false);
                log.info(translator.getSQLString());

                List<Long> phenosForThisCFG = query.list();
                phenoCollectionIdsSoFar.addAll(phenosForThisCFG);
                log.info("rows returned = " + phenoCollectionIdsSoFar.size());
            } else {
                log.info("there were no pheno custom data filters, therefore don't run filter query");
            }
        } else {
            log.info("there are no id's to filter.  therefore won't run filtering query");
        }
    }
    //now that we have all the phenoCollection IDs...get the updated list of subjects
    if (phenoCollectionIdsSoFar.isEmpty()) {
        if (!pdsgWithFilters.isEmpty()) {
            //there were no phenocollectionid's returned because they were validly filtered.  leave idsToIncludeAsItWas
            idsToInclude = new ArrayList<Long>();
        } else {
            //there were no filters so just leave the list of subjects ias it was
        }
    } else {
        idsToInclude = getSubjectIdsForPhenoDataIds(phenoCollectionIdsSoFar);
    }

    //now that we have the pheno collection id, we just find the data for the selected customfields

    if (!idsToInclude.isEmpty()) {
        Collection<PhenoDataSetFieldDisplay> customFieldToGet = getSelectedPhenoDataSetFieldDisplaysForSearch(
                search);//getSelectedPhenoCustomFieldDisplaysForSearch(search);
        // We have the list of phenos, and therefore the list of pheno custom data - now bring back all the custom data rows IF they have any data they need 
        if ((!phenoCollectionIdsSoFar.isEmpty()
                || (phenoCollectionIdsSoFar.isEmpty() && pdsgWithFilters.isEmpty()))
                && !customFieldToGet.isEmpty()) {
            String queryString = "select data from PhenoDataSetData data  "
                    + " left join fetch data.phenoDataSetCollection phenoDataSetCollection"
                    + " left join fetch data.phenoDataSetFieldDisplay phenoDataSetFieldDisplay "
                    + " left join fetch phenoDataSetFieldDisplay.phenoDataSetField phenoField "
                    + (((phenoCollectionIdsSoFar.isEmpty() && pdsgWithFilters.isEmpty())
                            ? (" where data.phenoDataSetCollection.linkSubjectStudy.id in (:idsToInclude) ")
                            : (" where data.phenoDataSetCollection.id in (:phenoIdsToInclude)")))
                    + " and data.phenoDataSetFieldDisplay in (:customFieldsList)"
                    + " order by data.phenoDataSetCollection.id";
            Query query2 = getSession().createQuery(queryString);
            if (phenoCollectionIdsSoFar.isEmpty() && pdsgWithFilters.isEmpty()) {
                query2.setParameterList("idsToInclude", idsToInclude);
            } else {
                query2.setParameterList("phenoIdsToInclude", phenoCollectionIdsSoFar);
            }
            query2.setParameterList("customFieldsList", customFieldToGet);

            QueryTranslatorFactory translatorFactory = new ASTQueryTranslatorFactory();
            SessionFactoryImplementor factory = (SessionFactoryImplementor) getSession().getSessionFactory();
            QueryTranslator translator = translatorFactory.createQueryTranslator(query2.getQueryString(),
                    query2.getQueryString(), Collections.EMPTY_MAP, factory);
            translator.compile(Collections.EMPTY_MAP, false);
            log.info(translator.getSQLString());
            List<PhenoDataSetData> phenoData = query2.list();

            HashMap<String, ExtractionVO> hashOfPhenosWithTheirPhenoCustomData = allTheData
                    .getPhenoCustomData();

            ExtractionVO valuesForThisPheno = new ExtractionVO();
            HashMap<String, String> map = null;
            Long previousPhenoId = null;
            //will try to order our results and can therefore just compare to last LSS and either add to or create new Extraction VO
            for (PhenoDataSetData data : phenoData) {

                if (previousPhenoId == null) {
                    map = new HashMap<String, String>();
                    previousPhenoId = data.getPhenoDataSetCollection().getId();
                    valuesForThisPheno.setSubjectUid(
                            data.getPhenoDataSetCollection().getLinkSubjectStudy().getSubjectUID());
                    valuesForThisPheno.setRecordDate(data.getPhenoDataSetCollection().getRecordDate());
                    valuesForThisPheno
                            .setCollectionName(data.getPhenoDataSetCollection().getQuestionnaire().getName());
                } else if (data.getPhenoDataSetCollection().getId().equals(previousPhenoId)) {
                    //then just put the data in
                } else { //if its a new LSS finalize previous map, etc
                    valuesForThisPheno.setKeyValues(map);
                    hashOfPhenosWithTheirPhenoCustomData.put(("" + previousPhenoId), valuesForThisPheno);
                    previousPhenoId = data.getPhenoDataSetCollection().getId();
                    map = new HashMap<String, String>();//reset
                    valuesForThisPheno = new ExtractionVO();
                    valuesForThisPheno.setSubjectUid(
                            data.getPhenoDataSetCollection().getLinkSubjectStudy().getSubjectUID());
                    valuesForThisPheno.setRecordDate(data.getPhenoDataSetCollection().getRecordDate());
                    valuesForThisPheno
                            .setCollectionName(data.getPhenoDataSetCollection().getQuestionnaire().getName());
                }

                //if any error value, then just use that - though, yet again I really question the acceptance of error data
                if (data.getErrorDataValue() != null && !data.getErrorDataValue().isEmpty()) {
                    map.put(data.getPhenoDataSetFieldDisplay().getPhenoDataSetField().getName(),
                            data.getErrorDataValue());
                } else {
                    // Determine field type and assign key value accordingly
                    if (data.getPhenoDataSetFieldDisplay().getPhenoDataSetField().getFieldType().getName()
                            .equalsIgnoreCase(Constants.FIELD_TYPE_DATE)) {
                        map.put(data.getPhenoDataSetFieldDisplay().getPhenoDataSetField().getName(),
                                data.getDateDataValue().toString());
                    }
                    if (data.getPhenoDataSetFieldDisplay().getPhenoDataSetField().getFieldType().getName()
                            .equalsIgnoreCase(Constants.FIELD_TYPE_NUMBER)) {
                        map.put(data.getPhenoDataSetFieldDisplay().getPhenoDataSetField().getName(),
                                data.getNumberDataValue().toString());
                    }
                    if (data.getPhenoDataSetFieldDisplay().getPhenoDataSetField().getFieldType().getName()
                            .equalsIgnoreCase(Constants.FIELD_TYPE_CHARACTER)) {
                        map.put(data.getPhenoDataSetFieldDisplay().getPhenoDataSetField().getName(),
                                data.getTextDataValue());
                    }
                }
            }

            //finalize the last entered key value sets/extraction VOs
            if (map != null && previousPhenoId != null) {
                valuesForThisPheno.setKeyValues(map);
                hashOfPhenosWithTheirPhenoCustomData.put("" + previousPhenoId, valuesForThisPheno);
            }

            //can probably now go ahead and add these to the dataVO...even though inevitable further filters may further axe this list or parts of it.
            allTheData.setPhenoCustomData(hashOfPhenosWithTheirPhenoCustomData);
        }

    }
    return idsToInclude;

}

From source file:au.org.theark.core.dao.StudyDao.java

License:Open Source License

/**
 * For now this is just forcing all the fields into one new file listing the geno info, given that I believe this whole model will change to include real genetic 
 * analysis going forward, rather than just discussing WHERE the data is and WHAT was done to it.  For now they get all of that info 
 * //from ww w  .jav a  2 s  .c o  m
 * @param allTheData
 * @param search
 * @param idsAfterFiltering
 * @param maxProcessesPerPipeline 
 */
private Long addGenoData(DataExtractionVO allTheData, Search search, List<Long> idsAfterFiltering,
        Map<Long, Long> maxInputList, Map<Long, Long> maxOutputList, Long maxProcessesPerPipeline) {
    log.info("idsAfterFiltering" + idsAfterFiltering);

    if (!idsAfterFiltering.isEmpty()) {
        //note.  filtering is happening previously...we then do the fetch when we have narrowed down the list of subjects to save a lot of processing
        String queryString = "select lssp from LinkSubjectStudyPipeline lssp "
                + " where lssp.linkSubjectStudy.id in (:idsToInclude) " // stoing this to an lss means we should fetch lss and pipeline..and process
                + " order by lssp.linkSubjectStudy.id ";

        Query query = getSession().createQuery(queryString);
        query.setParameterList("idsToInclude", idsAfterFiltering);
        List<LinkSubjectStudyPipeline> subjectPipelines = query.list();

        List<LinkedExtractionVO> allGenoData = allTheData.getGenoData();
        log.info("count=" + ((subjectPipelines == null) ? "0" : subjectPipelines.size()));

        /* this is putting the data we extracted into a generic kind of VO doc that will be converted to an appopriate format later (such as csv/xls/pdf/xml/etc) */
        for (LinkSubjectStudyPipeline lssp : subjectPipelines) {
            log.info("adding geno info for lss= " + lssp.getLinkSubjectStudy().getId());
            LinkedExtractionVO sev = new LinkedExtractionVO();
            // todo with geno in some way            sev.setKeyValues(constructKeyValueHashmap(lss, personFields, lssFields, addressFields, phoneFields));
            LinkedHashMap map = new LinkedHashMap<String, String>();
            sev.setSubjectUid(lssp.getLinkSubjectStudy().getSubjectUID());
            /*
             * 
            public static final String GENO_FIELDS_PIPELINE_ID = "pipelineId";
            public static final String GENO_FIELDS_PIPELINE_NAME = "pipelineName";
            public static final String GENO_FIELDS_PIPELINE_DECSRIPTION = "pipelineDescription";
            public static final String GENO_FIELDS_PROCESS_ID = "processId";
            public static final String GENO_FIELDS_PROCESS_NAME = "processName";
            public static final String GENO_FIELDS_PROCESS_DESCRIPTION = "processDescription";
            public static final String GENO_FIELDS_PROCESS_START_TIME = "startTime";
            public static final String GENO_FIELDS_PROCESS_END_TIME = "endTime";
            public static final String GENO_FIELDS_PROCESS_COMMAND_SERVER_URL = "commandServerUrl";
            public static final String GENO_FIELDS_PROCESS_COMMAND_NAME = "commandName";
            public static final String GENO_FIELDS_PROCESS_COMMAND_LOCATION = "commandLocation";
            //   public static final String GENO_FIELDS_PROCESS_COMMAND_INPUT_FILE_FORMAT;
            //   public static final String GENO_FIELDS_PROCESS_COMMAND_OUTPUT_FILE_FORMAT;
            public static final String GENO_FIELDS_PROCESS_INPUT_SERVER = "inputServer";
            public static final String GENO_FIELDS_PROCESS_INPUT_LOCATION = "inputLocation";
            public static final String GENO_FIELDS_PROCESS_INPUT_FILE_HASH = "inputFileHash";
            public static final String GENO_FIELDS_PROCESS_INPUT_FILE_TYPE = "inputFileType";
            public static final String GENO_FIELDS_PROCESS_INPUT_KEPT = "outputKept";
            public static final String GENO_FIELDS_PROCESS_OUTPUT_SERVER = "outputServer";
            public static final String GENO_FIELDS_PROCESS_OUTPUT_LOCATION = "outputLocation";
            public static final String GENO_FIELDS_PROCESS_OUTPUT_FILE_HASH = "outputFileHash";
            public static final String GENO_FIELDS_PROCESS_OUTPUT_FILE_TYPE = "outputFileType";
            public static final String GENO_FIELDS_PROCESS_OUTPUT_KEPT = "outputKept";*/

            //TODO : NULL CHECK EVERY SINGLE APPROPRIATE PLACE

            //TODO ASAP : change this to do all fields in a precise order (possibly defined somewhere common)
            Pipeline pl = lssp.getPipeline();
            map.put(Constants.GENO_FIELDS_PIPELINE_ID, pl.getId().toString());
            map.put(Constants.GENO_FIELDS_PIPELINE_NAME, pl.getName());
            map.put(Constants.GENO_FIELDS_PIPELINE_DECSRIPTION, pl.getDescription());

            long processIndex = 0L;

            log.info("we have process..." + pl.getPipelineProcesses().size());
            for (Process p : pl.getPipelineProcesses()) {
                processIndex++;
                if (processIndex >= maxProcessesPerPipeline) {
                    log.info("processIndex  maxProcessesPerPipeline = " + processIndex + "  "
                            + maxProcessesPerPipeline);
                    maxProcessesPerPipeline = Long.valueOf(processIndex);
                } else {
                    log.info("processIndex  maxProcessesPerPipeline = " + processIndex + "  "
                            + maxProcessesPerPipeline);
                }

                //TODO : obvbiously need to pre=append the pipeline info/count too
                map.put((Constants.GENO_FIELDS_PROCESS_ID + (processIndex > 1 ? ("_" + processIndex) : "")),
                        p.getId().toString());
                map.put((Constants.GENO_FIELDS_PROCESS_NAME + (processIndex > 1 ? ("_" + processIndex) : "")),
                        p.getName());
                map.put((Constants.GENO_FIELDS_PROCESS_DESCRIPTION
                        + (processIndex > 1 ? ("_" + processIndex) : "")), p.getDescription());
                map.put((Constants.GENO_FIELDS_PROCESS_START_TIME
                        + (processIndex > 1 ? ("_" + processIndex) : "")),
                        p.getStartTime() != null ? p.getStartTime().toLocaleString() : "");
                map.put((Constants.GENO_FIELDS_PROCESS_END_TIME
                        + (processIndex > 1 ? ("_" + processIndex) : "")),
                        p.getEndTime() != null ? p.getEndTime().toLocaleString() : "");
                Command command = p.getCommand();
                map.put((Constants.GENO_FIELDS_PROCESS_COMMAND_NAME
                        + (processIndex > 1 ? ("_" + processIndex) : "")),
                        (command == null ? "" : command.getName()));
                map.put((Constants.GENO_FIELDS_PROCESS_COMMAND_LOCATION
                        + (processIndex > 1 ? ("_" + processIndex) : "")),
                        (command == null ? "" : command.getLocation()));
                map.put((Constants.GENO_FIELDS_PROCESS_COMMAND_SERVER_URL
                        + (processIndex > 1 ? ("_" + processIndex) : "")),
                        (command == null ? "" : command.getServerUrl()));
                //map.put((Constants.GENO_FIELDS_PROCESS_COMMAND_LOCATION + (index>1?("_"+index):"")), (command==null?"":command.getName()));//space keeper for file format info
                //map.put((Constants.GENO_FIELDS_PROCESS_COMMAND_LOCATION + (index>1?("_"+index):"")), (command==null?"":command.getName()));

                Set<ProcessInput> inputs = p.getProcessInputs();
                long inputIndex = 0L;
                for (ProcessInput input : inputs) {
                    inputIndex++;
                    map.put((Constants.GENO_FIELDS_PROCESS_INPUT_SERVER + "_" + processIndex + "_"
                            + inputIndex), (input == null ? "" : input.getInputServer()));
                    map.put((Constants.GENO_FIELDS_PROCESS_INPUT_LOCATION + "_" + processIndex + "_"
                            + inputIndex), (input == null ? "" : input.getinputFileLocation()));
                    map.put((Constants.GENO_FIELDS_PROCESS_INPUT_FILE_HASH + "_" + processIndex + "_"
                            + inputIndex), (input == null ? "" : input.getInputFileHash()));
                    map.put((Constants.GENO_FIELDS_PROCESS_INPUT_FILE_TYPE + "_" + processIndex + "_"
                            + inputIndex), (input == null ? "" : input.getInputFileType()));
                    map.put((Constants.GENO_FIELDS_PROCESS_INPUT_KEPT + "_" + processIndex + "_" + inputIndex),
                            (input == null ? "" : ("" + input.getInputKept())));
                    //TODO ASAP : now put all the input info in with a similar _<index> suffix
                }

                long maxInputCurrent = (maxInputList.get(processIndex) == null) ? 0L
                        : maxInputList.get(processIndex);//get the procesIndex'th max input and see if it is bigger than 
                maxInputList.put(processIndex, (maxInputCurrent > inputIndex) ? maxInputCurrent : inputIndex);

                long outputIndex = 0L;
                Set<ProcessOutput> outputs = p.getProcessOutputs();
                for (ProcessOutput output : outputs) {
                    outputIndex++;//TODO ASAP : now put all the output info in with a similar _<index> suffix
                    map.put((Constants.GENO_FIELDS_PROCESS_OUTPUT_SERVER + "_" + processIndex + "_"
                            + outputIndex), (output == null ? "" : output.getOutputServer()));
                    map.put((Constants.GENO_FIELDS_PROCESS_OUTPUT_LOCATION + "_" + processIndex + "_"
                            + outputIndex), (output == null ? "" : output.getOutputFileLocation()));
                    map.put((Constants.GENO_FIELDS_PROCESS_OUTPUT_FILE_HASH + "_" + processIndex + "_"
                            + outputIndex), (output == null ? "" : output.getOutputFileHash()));
                    map.put((Constants.GENO_FIELDS_PROCESS_OUTPUT_FILE_TYPE + "_" + processIndex + "_"
                            + outputIndex), (output == null ? "" : output.getOutputFileType()));
                    map.put((Constants.GENO_FIELDS_PROCESS_OUTPUT_KEPT + "_" + processIndex + "_"
                            + outputIndex), (output == null ? "" : ("" + output.getOutputKept())));
                }

                long maxOutputCurrent = (maxOutputList.get(processIndex) == null) ? 0L
                        : maxOutputList.get(processIndex);//get the procesOutdex'th max output and see if it is bigger than 
                maxOutputList.put(processIndex,
                        (maxOutputCurrent > outputIndex) ? maxOutputCurrent : outputIndex);

                /*
                public static final String GENO_FIELDS_PROCESS_INPUT_SERVER = "inputServer";
                public static final String GENO_FIELDS_PROCESS_INPUT_LOCATION = "inputLocation";
                public static final String GENO_FIELDS_PROCESS_INPUT_FILE_HASH = "inputFileHash";
                public static final String GENO_FIELDS_PROCESS_INPUT_FILE_TYPE = "inputFileType";
                public static final String GENO_FIELDS_PROCESS_INPUT_KEPT = "outputKept";
                public static final String GENO_FIELDS_PROCESS_OUTPUT_SERVER = "outputServer";
                public static final String GENO_FIELDS_PROCESS_OUTPUT_LOCATION = "outputLocation";
                public static final String GENO_FIELDS_PROCESS_OUTPUT_FILE_HASH = "outputFileHash";
                public static final String GENO_FIELDS_PROCESS_OUTPUT_FILE_TYPE = "outputFileType";
                public static final String GENO_FIELDS_PROCESS_OUTPUT_KEPT = "outputKept";
                */
                //map.put((Constants.GENO_FIELDS_PROCESS_INPUT_KEPT + (index>1?("_"+index):"")), p.getId());
            }
            sev.setKeyValues(map);
            allGenoData.add(sev);
        }

    }
    return maxProcessesPerPipeline;
}

From source file:au.org.theark.core.dao.StudyDao.java

License:Open Source License

/**
 * /*from   w  w w .jav  a  2 s.  co m*/
 * 
 * @param allTheData
 * @param personFields
 * @param lssFields
 * @param addressFields
 * @param phoneFields
 * @param otherIDFields
 * @param subjectCFDs
 * @param search
 * @param idsAfterFiltering
 */
private void addDataFromMegaDemographicQuery(DataExtractionVO allTheData,
        Collection<DemographicField> personFields, Collection<DemographicField> lssFields,
        Collection<DemographicField> addressFields, Collection<DemographicField> phoneFields,
        Collection<DemographicField> otherIDFields, Collection<DemographicField> linkSubjectTwinsFields,
        Collection<CustomFieldDisplay> subjectCFDs, Search search, List<Long> idsAfterFiltering) {
    log.info("in addDataFromMegaDemographicQuery"); //if no id's, no need to run this
    if ((!lssFields.isEmpty() || !personFields.isEmpty() || !addressFields.isEmpty() || !phoneFields.isEmpty()
            || !linkSubjectTwinsFields.isEmpty() || !subjectCFDs.isEmpty()) && !idsAfterFiltering.isEmpty()) { // hasEmailFields(dfs)
        //note.  filtering is happening previously...we then do the fetch when we have narrowed down the list of subjects to save a lot of processing
        String queryString = "select distinct lss " // , address, lss, email " +
                + " from LinkSubjectStudy lss "
                + ((!personFields.isEmpty()) ? " left join fetch lss.person person " : "")
                + ((!addressFields.isEmpty()) ? " left join lss.person.addresses a " : "")
                + ((!phoneFields.isEmpty()) ? " left join lss.person.phones p " : "")
                + ((!linkSubjectTwinsFields.isEmpty())
                        ? " left join lss.linkSubjectTwinsAsFirstSubject lstAsFirst  "
                        : "")
                + ((!linkSubjectTwinsFields.isEmpty())
                        ? " left join lss.linkSubjectTwinsAsSecondSubject lstAsSecond  "
                        : "")
                + " where lss.study.id = " + search.getStudy().getId() + " and lss.id in (:idsToInclude) "
                + " order by lss.subjectUID";

        Query query = getSession().createQuery(queryString);
        query.setParameterList("idsToInclude", idsAfterFiltering);
        List<LinkSubjectStudy> subjects = query.list();

        QueryTranslatorFactory translatorFactory = new ASTQueryTranslatorFactory();
        SessionFactoryImplementor factory = (SessionFactoryImplementor) getSession().getSessionFactory();
        QueryTranslator translator = translatorFactory.createQueryTranslator(query.getQueryString(),
                query.getQueryString(), Collections.EMPTY_MAP, factory);
        translator.compile(Collections.EMPTY_MAP, false);
        log.info(translator.getSQLString());

        // DataExtractionVO devo; = new DataExtractionVO();
        HashMap<String, ExtractionVO> hashOfSubjectsWithTheirDemographicData = allTheData.getDemographicData();

        /* this is putting the data we extracted into a generic kind of VO doc that will be converted to an appopriate format later (such as csv/xls/pdf/xml/etc) */
        for (LinkSubjectStudy lss : subjects) {
            ExtractionVO sev = new ExtractionVO();
            sev.setKeyValues(constructKeyValueHashmap(lss, personFields, lssFields, addressFields, phoneFields,
                    otherIDFields, linkSubjectTwinsFields));
            hashOfSubjectsWithTheirDemographicData.put(lss.getSubjectUID(), sev);
        }

    }
}

From source file:au.org.theark.core.dao.StudyDao.java

License:Open Source License

private List<Long> addDataFromMegaBiocollectionQuery(DataExtractionVO allTheData,
        Collection<BiocollectionField> biocollectionFields, Collection<CustomFieldDisplay> collectionCFDs,
        Search search, List<Long> idsToInclude, List<Long> biocollectionIdsAfterFiltering) {
    String bioCollectionFilters = getBiocollectionFilters(search);

    Collection<BioCollection> bioCollectionList = Collections.EMPTY_LIST;

    if (biocollectionFields.isEmpty() && bioCollectionFilters.isEmpty()) {
        if (idsToInclude.isEmpty()) {
            // no need - skip querying
        } else {//from  w w  w. j a  v a  2 s  .  c  o m
            biocollectionIdsAfterFiltering = getBioCollectionIdForSubjectIds(idsToInclude);
            if (!biocollectionIdsAfterFiltering.isEmpty()) {
                bioCollectionList = getSession().createCriteria(BioCollection.class)
                        .add(Restrictions.in("id", biocollectionIdsAfterFiltering)).list();
            }
        }
    }

    if (!idsToInclude.isEmpty() && biocollectionIdsAfterFiltering.isEmpty()
            && (!bioCollectionFilters.isEmpty() || !biocollectionFields.isEmpty())) {

        StringBuffer queryBuffer = new StringBuffer("select distinct biocollection ");
        queryBuffer.append("from BioCollection biocollection "); //   TODO:  improve preformance by prefetch
        queryBuffer.append(" where biocollection.study.id = " + search.getStudy().getId());

        if (!bioCollectionFilters.isEmpty()) {
            queryBuffer.append(bioCollectionFilters);
        }
        queryBuffer.append("  and biocollection.linkSubjectStudy.id in (:idsToInclude) ");

        Query query = getSession().createQuery(queryBuffer.toString());
        query.setParameterList("idsToInclude", idsToInclude);
        bioCollectionList = query.list();
    }
    HashSet uniqueSubjectIDs = new HashSet<Long>();
    HashMap<String, ExtractionVO> hashOfBioCollectionData = allTheData.getBiocollectionData();

    for (BioCollection bioCollection : bioCollectionList) {
        ExtractionVO sev = new ExtractionVO();
        sev.setKeyValues(constructKeyValueHashmap(bioCollection, biocollectionFields));
        hashOfBioCollectionData.put(bioCollection.getBiocollectionUid(), sev);
        uniqueSubjectIDs.add(bioCollection.getLinkSubjectStudy().getId());
        sev.setSubjectUid(bioCollection.getLinkSubjectStudy().getSubjectUID()); //TODO: mow that we haevb this probably need to fetch join to save us a bunch of hits to the db
        biocollectionIdsAfterFiltering.add(bioCollection.getId());
    }

    //maintaining list of subject IDs for filtering past results
    if (!bioCollectionFilters.isEmpty()) {
        idsToInclude = new ArrayList(uniqueSubjectIDs);
    }
    return biocollectionIdsAfterFiltering;
}

From source file:au.org.theark.core.dao.StudyDao.java

License:Open Source License

private List<Long> addDataFromMegaBiospecimenQuery(DataExtractionVO allTheData,
        Collection<BiospecimenField> biospecimenFields, //Collection<CustomFieldDisplay> specimenCFDs, 
        Search search, List<Long> idsToInclude, List<Long> biospecimenIdsAfterFiltering,
        List<Long> bioCollectionIdsAfterFiltering) {

    String biospecimenFilters = getBiospecimenFilters(search);

    HashMap<String, ExtractionVO> hashOfBiospecimenData = allTheData.getBiospecimenData();

    Collection<Biospecimen> biospecimenList = null;

    if ((biospecimenFields.isEmpty() && biospecimenFilters.isEmpty())) {
        if (idsToInclude.isEmpty()) {
            // no need
        } else {/*from   w ww .  java  2 s  .  c o  m*/
            biospecimenIdsAfterFiltering = getBiospecimenIdForSubjectIds(idsToInclude);
            if (biospecimenIdsAfterFiltering.isEmpty()) {
                return Collections.EMPTY_LIST;
            } else {
                biospecimenList = getSession().createCriteria(Biospecimen.class)
                        .add(Restrictions.in("id", biospecimenIdsAfterFiltering)).list();
            }
        }
    } else if ((!biospecimenFields.isEmpty() || !biospecimenFilters.isEmpty()) && !idsToInclude.isEmpty()) {

        StringBuffer queryBuffer = new StringBuffer("select distinct biospecimen ");
        queryBuffer.append("from Biospecimen biospecimen ");
        queryBuffer.append("    left join fetch biospecimen.sampleType sampleType ");
        queryBuffer.append("   left join fetch biospecimen.invCell invCell "); //Not lookup compatible
        queryBuffer.append("   left join fetch biospecimen.storedIn storedIn ");
        queryBuffer.append("   left join fetch biospecimen.grade grade ");
        queryBuffer.append("   left join fetch biospecimen.species species ");
        queryBuffer.append("   left join fetch biospecimen.unit unit ");
        queryBuffer.append("   left join fetch biospecimen.treatmentType treatmentType ");
        queryBuffer.append("   left join fetch biospecimen.quality quality ");
        queryBuffer.append("   left join fetch biospecimen.anticoag anticoag ");
        queryBuffer.append("   left join fetch biospecimen.status status ");
        queryBuffer.append("   left join fetch biospecimen.biospecimenProtocol biospecimenProtocol ");
        queryBuffer.append("   left join fetch biospecimen.bioCollection biocollection ");
        queryBuffer.append(" where biospecimen.study.id = " + search.getStudy().getId());
        if (!biospecimenFilters.isEmpty()) {
            queryBuffer.append(biospecimenFilters);
        }

        queryBuffer.append("  and biospecimen.linkSubjectStudy.id in (:idsToInclude) ");

        if (!bioCollectionIdsAfterFiltering.isEmpty()) {
            queryBuffer.append("  and biospecimen.bioCollection.id in (:biocollectionsToFilter) ");
        } else {
            biospecimenIdsAfterFiltering = new ArrayList<Long>();
            return new ArrayList<Long>();
        }

        Query query = getSession().createQuery(queryBuffer.toString());
        query.setParameterList("idsToInclude", idsToInclude);
        if (!bioCollectionIdsAfterFiltering.isEmpty()) {
            query.setParameterList("biocollectionsToFilter", bioCollectionIdsAfterFiltering);
        }

        biospecimenList = query.list();
    }
    HashSet uniqueSubjectIDs = new HashSet<Long>();
    for (Biospecimen biospecimen : biospecimenList) {
        ExtractionVO sev = new ExtractionVO();
        sev.setKeyValues(constructKeyValueHashmap(biospecimen, biospecimenFields));
        sev.setSubjectUid(biospecimen.getLinkSubjectStudy().getSubjectUID());
        hashOfBiospecimenData.put(biospecimen.getBiospecimenUid(), sev);
        uniqueSubjectIDs.add(biospecimen.getLinkSubjectStudy().getId());
        biospecimenIdsAfterFiltering.add(biospecimen.getId());
    }

    //maintaining list of subject IDs for filtering past results
    if (!biospecimenFilters.isEmpty()) {

        idsToInclude.clear();
        for (Object id : uniqueSubjectIDs) {
            idsToInclude.add((Long) id);
        }
        log.info("LATEST LIST OF IDS SIZE=" + idsToInclude.size());
    }
    allTheData.setBiospecimenData(hashOfBiospecimenData);//wouldnt think I need to set ht
    //log.info("addDataFromMegaBiospecimenQuery.biospecimenIdsAfterFiltering: " + biospecimenIdsAfterFiltering.size());
    return biospecimenIdsAfterFiltering;
}