List of usage examples for org.hibernate Criteria list
public List list() throws HibernateException;
From source file:au.org.theark.core.dao.StudyDao.java
License:Open Source License
public Collection<BiospecimenField> getAllBiospecimenFields() { Criteria criteria = getSession().createCriteria(BiospecimenField.class); return criteria.list(); }
From source file:au.org.theark.core.dao.StudyDao.java
License:Open Source License
public Collection<BiocollectionField> getAllBiocollectionFields() { Criteria criteria = getSession().createCriteria(BiocollectionField.class); return criteria.list(); }
From source file:au.org.theark.core.dao.StudyDao.java
License:Open Source License
public void runSearch(Long searchId, String currentUser) { DataExtractionVO allTheData = new DataExtractionVO(); Search search = (Search) getSession().get(Search.class, searchId); if (search == null) { // TODO errors and reports } else {/* ww w . j a va2s. co m*/ //getMaxAddressesForTheseSubjects(null, null); chris doesn't need this right now List<DemographicField> addressDFs = getSelectedDemographicFieldsForSearch(search, Entity.Address); List<DemographicField> lssDFs = getSelectedDemographicFieldsForSearch(search, Entity.LinkSubjectStudy); List<DemographicField> personDFs = getSelectedDemographicFieldsForSearch(search, Entity.Person); List<DemographicField> phoneDFs = getSelectedDemographicFieldsForSearch(search, Entity.Phone); List<DemographicField> otherIDDFs = getSelectedDemographicFieldsForSearch(search, Entity.OtherID); //added on 2015-11-03 include the twin subject uid and the twin type. List<DemographicField> twinDetailDFs = getSelectedDemographicFieldsForSearch(search, Entity.LinkSubjectTwin); List<DemographicField> allSubjectFields = new ArrayList<DemographicField>(); allSubjectFields.addAll(addressDFs); allSubjectFields.addAll(lssDFs); allSubjectFields.addAll(personDFs); allSubjectFields.addAll(phoneDFs); allSubjectFields.addAll(twinDetailDFs); List<BiospecimenField> bsfs = getSelectedBiospecimenFieldsForSearch(search); List<BiocollectionField> bcfs = getSelectedBiocollectionFieldsForSearch(search); List<CustomFieldDisplay> bccfds = getSelectedBiocollectionCustomFieldDisplaysForSearch(search); List<CustomFieldDisplay> bscfds = getSelectedBiospecimenCustomFieldDisplaysForSearch(search); List<CustomFieldDisplay> scfds = getSelectedSubjectCustomFieldDisplaysForSearch(search); List<PhenoDataSetFieldDisplay> pfds = getSelectedPhenoDataSetFieldDisplaysForSearch(search); List<ConsentStatusField> consentStatus = (List<ConsentStatusField>) getSelectedConsentStatusFieldsForSearch( search); /* Making this stuff into an xml document THEN converting it generically to xls/csv/pdf/etc might be an option * other options; * 1 get each of these and apply a filter every time * 2 a megaquery to get EVERYTHING FOR EVERYONE into our "report/value object/model" * 3 use the filters to create a set of subjectUIDs and maybe apply that, though may also needs a set of pheno_data_id, subj_custom_ids, etc */ //DEMOGRAPHIC FILTERING - but not data List<Long> idsAfterFiltering = applyDemographicFilters(search); //from here we need to add log.info("uids afterFilteringdemo=" + idsAfterFiltering.size()); //CONSENT STATUS FILTERING - still being worked on not complete...but doesn't break anything // List<Long> consentAfterFiltering = if (!idsAfterFiltering.isEmpty()) { idsAfterFiltering = applyConsentStatusFilters(allTheData, search, idsAfterFiltering); } log.info("consent idsAfterFiltering=" + idsAfterFiltering.size()); //BIOCOLLECTION List<Long> bioCollectionIdsAfterFiltering = new ArrayList<Long>(); if (!idsAfterFiltering.isEmpty()) { bioCollectionIdsAfterFiltering = addDataFromMegaBiocollectionQuery(allTheData, bcfs, bccfds, search, idsAfterFiltering, bioCollectionIdsAfterFiltering); } log.info("uidsafterFiltering doing the construction of megaobject=" + idsAfterFiltering.size()); log.info("uidsafterFiltering biocol=" + idsAfterFiltering.size()); //BIOCOL CUSTOM if (!idsAfterFiltering.isEmpty()) { idsAfterFiltering = applyBioCollectionCustomFilters(allTheData, search, idsAfterFiltering, bioCollectionIdsAfterFiltering); //change will be applied to referenced object } log.info("uidsafterFiltering biocol cust=" + idsAfterFiltering.size()); //BIOSPECIMEN List<Long> biospecimenIdsAfterFiltering = new ArrayList<Long>(); if (!idsAfterFiltering.isEmpty()) { biospecimenIdsAfterFiltering = addDataFromMegaBiospecimenQuery(allTheData, bsfs, search, idsAfterFiltering, biospecimenIdsAfterFiltering, bioCollectionIdsAfterFiltering); } log.info("biospecimenIdsAfterFiltering size: " + biospecimenIdsAfterFiltering.size()); log.info("uidsafterFilteringbiospec=" + idsAfterFiltering.size()); //BIOSPEC CUSTOM if (!idsAfterFiltering.isEmpty()) { idsAfterFiltering = applyBiospecimenCustomFilters(allTheData, search, idsAfterFiltering, biospecimenIdsAfterFiltering); //change will be applied to referenced object } log.info("uidsafterFiltering=Biospec cust" + idsAfterFiltering.size() + "biospecimenIdsAfterFiltering custom size: " + biospecimenIdsAfterFiltering.size()); //PHENO CUSTOM if (!idsAfterFiltering.isEmpty()) { idsAfterFiltering = applyPhenoDataSetFilters(allTheData, search, idsAfterFiltering); //change will be applied to referenced object } log.info("uidsafterFiltering pheno cust=" + idsAfterFiltering.size()); //DEMOGRAPHIC DATA idsAfterFiltering = applySubjectCustomFilters(allTheData, search, idsAfterFiltering); //change will be applied to referenced object wipeBiospecimenDataNotMatchingThisList(search.getStudy(), allTheData, biospecimenIdsAfterFiltering, bioCollectionIdsAfterFiltering, idsAfterFiltering); wipeBiocollectionDataNotMatchThisList(search.getStudy(), allTheData, bioCollectionIdsAfterFiltering, idsAfterFiltering, biospecimenIdsAfterFiltering, getBiospecimenQueryFilters(search)); //2015-11-03 include the Twin subject uid with the twin type after the otherIDFs addDataFromMegaDemographicQuery(allTheData, personDFs, lssDFs, addressDFs, phoneDFs, otherIDDFs, twinDetailDFs, scfds, search, idsAfterFiltering);//This must go last, as the number of joining tables is going to affect performance log.info("uidsafterFiltering SUBJECT cust=" + idsAfterFiltering.size()); Map<Long, Long> maxInputList = new HashMap<Long, Long>();//pass the index and do a max comparison to minimize a simple grid which will be too bulky Map<Long, Long> maxOutputList = new HashMap<Long, Long>(); Long maxProcessesPerPipeline = new Long(0L); if (search.getIncludeGeno()) { maxProcessesPerPipeline = addGenoData(allTheData, search, idsAfterFiltering, maxInputList, maxOutputList, maxProcessesPerPipeline);//TODO: test } prettyLoggingOfWhatIsInOurMegaObject(allTheData.getDemographicData(), FieldCategory.DEMOGRAPHIC_FIELD); prettyLoggingOfWhatIsInOurMegaObject(allTheData.getSubjectCustomData(), FieldCategory.SUBJECT_CFD); prettyLoggingOfWhatIsInOurMegaObject(allTheData.getBiospecimenData(), FieldCategory.BIOSPECIMEN_FIELD); prettyLoggingOfWhatIsInOurMegaObject(allTheData.getBiospecimenData(), FieldCategory.BIOCOLLECTION_FIELD); // prettyLoggingOfWhatIsInOurMegaObject(allTheData.getConsentStatusData(), FieldCategory.CONSENT_STATUS_FIELD); // CREATE CSVs - later will offer options xml, pdf, etc SearchResult searchResult = new SearchResult(); searchResult.setSearch(search); Criteria criteria = getSession().createCriteria(SearchResult.class); criteria.add(Restrictions.eq("search", search)); List<SearchResult> searchResults = criteria.list(); for (SearchResult sr : searchResults) { deleteSearchResult(sr); } createSearchResult(search, iDataExtractionDao.createSubjectDemographicCSV(search, allTheData, allSubjectFields, scfds, FieldCategory.DEMOGRAPHIC_FIELD), currentUser); createSearchResult(search, iDataExtractionDao.createBiocollectionCSV(search, allTheData, bccfds, FieldCategory.BIOCOLLECTION_FIELD), currentUser); createSearchResult(search, iDataExtractionDao.createBiospecimenCSV(search, allTheData, bsfs, bscfds, FieldCategory.BIOSPECIMEN_FIELD), currentUser); createSearchResult(search, iDataExtractionDao.createPhenotypicCSV(search, allTheData, pfds, FieldCategory.PHENO_FD), currentUser); if (search.getIncludeGeno()) { createSearchResult(search, iDataExtractionDao.createGenoCSV(search, allTheData, FieldCategory.GENO, maxProcessesPerPipeline, maxInputList, maxOutputList), currentUser); } createSearchResult(search, iDataExtractionDao.createConsentStatusCSV(search, allTheData, consentStatus, FieldCategory.CONSENT_STATUS_FIELD), currentUser); createSearchResult(search, iDataExtractionDao.createMegaCSV(search, allTheData, allSubjectFields, bccfds, bscfds, pfds, consentStatus), currentUser); try { search.setFinishTime(new java.util.Date(System.currentTimeMillis())); search.setStatus("FINISHED"); update(search); } catch (EntityExistsException e) { // TODO Auto-generated catch block e.printStackTrace(); //TODO don't catch exceptions without doing something - and for that matter we should really start having statuses on our extractions so people can know what happened log.error("Error while updating search with finish time."); } } }
From source file:au.org.theark.core.dao.StudyDao.java
License:Open Source License
private List<Long> applyConsentStatusFilters(DataExtractionVO allTheData, Search search, List<Long> idsToInclude) { //for(Long l : idsToInclude) { // log.info("including: " + l); //}// w w w .jav a 2 s . c o m boolean hasConsentFilters = false; if (search.getQueryFilters().isEmpty()) { return idsToInclude; } else { for (QueryFilter filter : search.getQueryFilters()) { if (filter.getConsentStatusField() != null) { hasConsentFilters = true; } } } Criteria filter = getSession().createCriteria(Consent.class, "c"); filter.add(Restrictions.eq("c.study.id", search.getStudy().getId())); filter.createAlias("c.linkSubjectStudy", "lss"); if (!idsToInclude.isEmpty()) { filter.add(Restrictions.in("lss.id", idsToInclude)); } filter.createAlias("c.studyComponentStatus", "cscs"); filter.createAlias("c.studyComp", "csc"); if (!hasConsentFilters) { for (QueryFilter qf : search.getQueryFilters()) { if (qf.getConsentStatusField() != null) { switch (qf.getOperator()) { case EQUAL: filter.add(Restrictions.eq(getConsentFilterFieldName(qf), qf.getValue())); break; case BETWEEN: filter.add(Restrictions.between(getConsentFilterFieldName(qf), qf.getValue(), qf.getSecondValue())); break; case GREATER_THAN: filter.add(Restrictions.gt(getConsentFilterFieldName(qf), qf.getValue())); break; case GREATER_THAN_OR_EQUAL: filter.add(Restrictions.ge(getConsentFilterFieldName(qf), qf.getValue())); break; case IS_EMPTY: filter.add(Restrictions.isEmpty(getConsentFilterFieldName(qf))); break; case IS_NOT_EMPTY: filter.add(Restrictions.isNotEmpty(getConsentFilterFieldName(qf))); break; case LESS_THAN: filter.add(Restrictions.lt(getConsentFilterFieldName(qf), qf.getValue())); break; case LESS_THAN_OR_EQUAL: filter.add(Restrictions.le(getConsentFilterFieldName(qf), qf.getValue())); break; case LIKE: filter.add(Restrictions.like(getConsentFilterFieldName(qf), qf.getValue(), MatchMode.ANYWHERE)); break; case NOT_EQUAL: filter.add(Restrictions.ne(getConsentFilterFieldName(qf), qf.getValue())); break; default: break; } } } } filter.setProjection( Projections.distinct(Projections.projectionList().add(Projections.property("lss.id")))); List<Long> consentStatusIDs = filter.list(); Collection<Consent> csData = Collections.EMPTY_LIST; if (!consentStatusIDs.isEmpty()) { Criteria consentData = getSession().createCriteria(Consent.class, "c"); consentData.add(Restrictions.eq("c.study.id", search.getStudy().getId())); consentData.createAlias("c.linkSubjectStudy", "lss"); consentData.add(Restrictions.in("lss.id", consentStatusIDs)); csData = consentData.list(); } HashMap<String, ExtractionVO> hashOfConsentStatusData = allTheData.getConsentStatusData(); ExtractionVO valuesForThisLss = new ExtractionVO(); HashMap<String, String> map = null; LinkSubjectStudy previousLss = null; int count = 0; //will try to order our results and can therefore just compare to last LSS and either add to or create new Extraction VO for (Consent data : csData) { if (previousLss == null) { map = new HashMap<String, String>(); previousLss = data.getLinkSubjectStudy(); count = 0; } else if (data.getLinkSubjectStudy().getId().equals(previousLss.getId())) { //then just put the data in count++; } else { //if its a new LSS finalize previous map, etc valuesForThisLss.setKeyValues(map); valuesForThisLss.setSubjectUid(previousLss.getSubjectUID()); hashOfConsentStatusData.put(previousLss.getSubjectUID(), valuesForThisLss); previousLss = data.getLinkSubjectStudy(); map = new HashMap<String, String>();//reset valuesForThisLss = new ExtractionVO(); count = 0; } if (data.getStudyComp().getName() != null) { map.put(count + "_Study Component Name", data.getStudyComp().getName()); } if (data.getStudyComponentStatus() != null) { map.put(count + "_Study Component Status", data.getStudyComponentStatus().getName()); } if (data.getConsentDate() != null) { map.put(count + "_Consent Date", data.getConsentDate().toString()); } if (data.getConsentedBy() != null) { map.put(count + "_Consented By", data.getConsentedBy()); } } //finalize the last entered key value sets/extraction VOs if (map != null && previousLss != null) { valuesForThisLss.setKeyValues(map); valuesForThisLss.setSubjectUid(previousLss.getSubjectUID()); hashOfConsentStatusData.put(previousLss.getSubjectUID(), valuesForThisLss); } //can probably now go ahead and add these to the dataVO...even though inevitable further filters may further axe this list or parts of it. allTheData.setConsentStatusData(hashOfConsentStatusData); if (hasConsentFilters) { return consentStatusIDs; } else { return idsToInclude; } }
From source file:au.org.theark.core.dao.StudyDao.java
License:Open Source License
public List<QueryFilterVO> getQueryFilterVOs(Search search) { List<QueryFilterVO> filterVOs = new ArrayList<QueryFilterVO>(); Criteria criteria = getSession().createCriteria(QueryFilter.class); if (search != null && search.getId() != null) { criteria.add(Restrictions.eq("search", search)); List<QueryFilter> filters = criteria.list(); for (QueryFilter filter : filters) { QueryFilterVO filterVO = new QueryFilterVO(); filterVO.setQueryFilter(filter); if (filter.getDemographicField() != null) { filterVO.setFieldCategory(FieldCategory.DEMOGRAPHIC_FIELD); } else if (filter.getBiocollectionField() != null) { filterVO.setFieldCategory(FieldCategory.BIOCOLLECTION_FIELD); } else if (filter.getBiospecimenField() != null) { filterVO.setFieldCategory(FieldCategory.BIOSPECIMEN_FIELD); } else if (filter.getCustomFieldDisplay() != null) { filterVO.setFieldCategory( getFieldCategoryFor(filter.getCustomFieldDisplay().getCustomField().getArkFunction())); } else if (filter.getConsentStatusField() != null) { filterVO.setFieldCategory(FieldCategory.CONSENT_STATUS_FIELD); }//from w ww . ja va2 s. c om filterVOs.add(filterVO); } } return filterVOs; }
From source file:au.org.theark.core.dao.StudyDao.java
License:Open Source License
public List<Study> getParentAndChildStudies(Long id) { Criteria studyCriteria = getSession().createCriteria(Study.class); Study study = getStudy(id);/* w w w .ja v a 2 s . c o m*/ if (study.getStudyStatus() != null) { studyCriteria.add(Restrictions.eq(Constants.STUDY_STATUS, study.getStudyStatus())); try { StudyStatus status = getStudyStatus("Archive"); studyCriteria.add(Restrictions.ne(Constants.STUDY_STATUS, status)); } catch (StatusNotAvailableException notAvailable) { log.error("Cannot look up and filter on archive status. Reference data could be missing"); } } else { try { StudyStatus status = getStudyStatus("Archive"); studyCriteria.add(Restrictions.ne(Constants.STUDY_STATUS, status)); } catch (StatusNotAvailableException notAvailable) { log.error("Cannot look up and filter on archive status. Reference data could be missing"); } } if (study.getParentStudy() != null && !study.getParentStudy().equals(study)) { studyCriteria.add( Restrictions.or(Restrictions.idEq(id), Restrictions.eq("parentStudy", study.getParentStudy()))); } else { studyCriteria.add(Restrictions.or(Restrictions.idEq(id), Restrictions.eq("parentStudy", study))); } studyCriteria.addOrder(Order.asc("id")); studyCriteria.addOrder(Order.asc(Constants.STUDY_NAME)); return studyCriteria.list(); }
From source file:au.org.theark.core.dao.StudyDao.java
License:Open Source License
public List<SearchResult> getSearchResultList(Long searchResultId) { Criteria criteria = getSession().createCriteria(SearchResult.class); criteria.add(Restrictions.eq("search.id", searchResultId)); return criteria.list(); }
From source file:au.org.theark.core.dao.StudyDao.java
License:Open Source License
public List<Relationship> getFamilyRelationships() { Criteria criteria = getSession().createCriteria(Relationship.class); return criteria.list(); }
From source file:au.org.theark.core.dao.StudyDao.java
License:Open Source License
public List<SearchSubject> getSearchSubjects() { Criteria criteria = getSession().createCriteria(SearchSubject.class); return criteria.list(); }
From source file:au.org.theark.core.dao.StudyDao.java
License:Open Source License
public List<Long> getSubjectIdsforSearch(Search search) { Criteria criteria = getSession().createCriteria(SearchSubject.class); criteria.add(Restrictions.eq("search", search)); criteria.setProjection(Projections.property("linkSubjectStudy.id")); return criteria.list(); }