Example usage for org.hibernate ScrollableResults next

List of usage examples for org.hibernate ScrollableResults next

Introduction

In this page you can find the example usage for org.hibernate ScrollableResults next.

Prototype

boolean next();

Source Link

Document

Advance to the next result.

Usage

From source file:de.tudarmstadt.ukp.lmf.api.UbyStatistics.java

License:Apache License

/**
 * Return a {@link Set} of {@link String} instances consisting of <code>lemma+"_"+part-of-speech</code>,
 *       filtered by given {@link Lexicon} name.<br>
 * The lemma is obtained from the written form of the first {@link FormRepresentation} of the {@link Lemma}
 * instance.//from w  ww.  ja  v a  2s  .c  o m
 * @param lexiconName
 *          name of the lexicon which lemmas should be used
 * 
 * @return a set of strings containing lemma and part-of-speech of the specified lexicon.<br>
 * This method returns an empty set if the lexicon with the specified name does no exist.
 * 
 * @see Lemma#getFormRepresentations()
 * @see FormRepresentation#getWrittenForm()
 * @see EPartOfSpeech
 */
public Set<String> getLemmaPosPerLexicon(String lexiconName) {
    Criteria criteria = session.createCriteria(Lexicon.class, "l");
    criteria = criteria.createCriteria("lexicalEntries", "e");
    if (lexiconName != null) {
        criteria = criteria.add(Restrictions.eq("l.name", lexiconName));
    }
    criteria = criteria.createCriteria("lemma").createCriteria("formRepresentations", "f")
            .setProjection(Projections.projectionList().add(Property.forName("f.writtenForm"))
                    .add(Property.forName("e.partOfSpeech")));
    ScrollableResults res = criteria.scroll();
    ArrayList<String> out = new ArrayList<String>();
    while (res.next()) {
        Object[] r = res.get();
        if (r[1] != null) { // some resources do not have POS
            out.add((String) r[0] + "_" + ((EPartOfSpeech) r[1]).toString());
        } else {
            out.add((String) r[0] + "_null");
        }

    }
    HashSet<String> out2 = new HashSet<String>(out);
    return out2;
}

From source file:de.tudarmstadt.ukp.lmf.api.UbyStatistics.java

License:Apache License

/**
 * Return a {@link Set} of {@link String} instances consisting of <code>lemma+"_"+part-of-speech</code>,
 *       filtered by given {@link Lexicon} name, part-of-speech prefix and a language identifier.<br>
 * The lemma is obtained from the written form of the first {@link FormRepresentation} of the {@link Lemma}
 * instance./*from w ww.  ja  v a 2  s. c om*/
 * 
 * @param lexiconName
 *          name of the lexicon which lemmas should be used
 * 
 * @param prefix the part-of-speech prefix used when filtering {@link LexicalEntry} instances
 * 
 * @param lang the language identifier used when filtering lexical entries
 * 
 * @return a set of strings containing lemma and part-of-speech of the specified lexicon.<br>
 * 
 * This method returns an empty set if the lexicon with the specified name does no exist or
 * the lexicon does not contain any lexical entries with specified part-of-speech prefix and language
 * identifier.
 * 
 * @see Lemma#getFormRepresentations()
 * @see FormRepresentation#getWrittenForm()
 * @see EPartOfSpeech
 * @see ELanguageIdentifier
 */
public Set<String> getLemmaPosPerLexiconAndPosPrefixAndLanguage(String lexiconName, String prefix,
        String lang) {
    Criteria criteria = session.createCriteria(Lexicon.class, "l");

    criteria = criteria.createCriteria("lexicalEntries", "e");
    if (lexiconName != null) {
        criteria = criteria.add(Restrictions.eq("l.name", lexiconName));
    }
    if (lang != null) {
        criteria = criteria.add(Restrictions.eq("l.languageIdentifier", lang));
    }
    if (prefix != null) {
        criteria = criteria.add(Restrictions.sqlRestriction("partOfSpeech like '" + prefix + "'"));
    }
    criteria = criteria.createCriteria("lemma").createCriteria("formRepresentations", "f")
            .setProjection(Projections.projectionList().add(Property.forName("f.writtenForm"))
                    .add(Property.forName("e.partOfSpeech")));
    ScrollableResults res = criteria.scroll();
    ArrayList<String> out = new ArrayList<String>();
    while (res.next()) {
        Object[] r = res.get();
        if (r[1] != null) {
            out.add((String) r[0] + "_" + ((EPartOfSpeech) r[1]).toString());
        } else {
            out.add((String) r[0] + "_null");
        }
    }
    HashSet<String> out2 = new HashSet<String>(out);
    return out2;

}

From source file:de.tudarmstadt.ukp.lmf.transform.DBToXMLTransformer.java

License:Apache License

protected void doTransform(boolean includeAxes, final Lexicon... includeLexicons) throws SAXException {
    final int bufferSize = 100;
    commitCounter = 1;//from w ww  . j  a v  a  2  s  .co m

    writeStartElement(lexicalResource);

    // Iterate over all lexicons
    if (includeLexicons == null || includeLexicons.length > 0) {
        for (Lexicon lexicon : lexicalResource.getLexicons()) {
            String lexiconName = lexicon.getName();

            // Check if we want to include this lexicon.
            if (includeLexicons != null) {
                boolean found = false;
                for (Lexicon l : includeLexicons) {
                    if (lexiconName.equals(l.getName())) {
                        found = true;
                        break;
                    }
                }
                if (!found) {
                    continue;
                }
            }

            logger.info("Processing lexicon: " + lexiconName);
            writeStartElement(lexicon);

            // Iterate over all possible sub-elements of this Lexicon and
            // write them to the XML
            Class<?>[] lexiconClassesToSave = { LexicalEntry.class, SubcategorizationFrame.class,
                    SubcategorizationFrameSet.class, SemanticPredicate.class, Synset.class,
                    SynSemCorrespondence.class,
                    //ConstraintSet.class
            };

            //  "Unfortunately, MySQL does not treat large offset values efficiently by default and will still read all the rows prior to an offset value. It is common to see a query with an offset above 100,000 take over 20 times longer than an offset of zero!"
            // http://www.numerati.com/2012/06/26/reading-large-result-sets-with-hibernate-and-mysql/
            for (Class<?> clazz : lexiconClassesToSave) {
                /*DetachedCriteria criteria = DetachedCriteria.forClass(clazz)
                      .add(Restrictions.sqlRestriction("lexiconId = '" + lexicon.getId() + "'"));
                CriteriaIterator<Object> iter = new CriteriaIterator<Object>(criteria, sessionFactory, bufferSize);
                while (iter.hasNext()) {
                   Object obj = iter.next();
                   writeElement(obj);
                   session.evict(obj);
                   commitCounter++;
                   if (commitCounter % 1000 == 0)
                      logger.info("progress: " + commitCounter  + " class instances written to file");
                }*/
                Session lookupSession = sessionFactory.openSession();
                Query query = lookupSession.createQuery("FROM " + clazz.getSimpleName() + " WHERE lexiconId = '"
                        + lexicon.getId() + "' ORDER BY id");
                query.setReadOnly(true);
                if (DBConfig.MYSQL.equals(dbConfig.getDBType())) {
                    query.setFetchSize(Integer.MIN_VALUE); // MIN_VALUE gives hint to JDBC driver to stream results
                } else {
                    query.setFetchSize(1000);
                }
                ScrollableResults results = query.scroll(ScrollMode.FORWARD_ONLY);
                while (results.next()) {
                    // For streamed query results, no further queries are allowed (incl. lazy proxy queries!)
                    // Detach the object from the lookup session and reload it using the "official" session.
                    Object[] rows = results.get();
                    Object row = rows[0];
                    lookupSession.evict(row);
                    lookupSession.evict(rows);
                    rows = null;
                    row = session.get(row.getClass(), ((IHasID) row).getId());
                    writeElement(row);
                    session.evict(row);
                    row = null;
                    commitCounter++;
                    if (commitCounter % 1000 == 0) {
                        logger.info("progress: " + commitCounter + " class instances written to file");
                    }
                    if (commitCounter % 10000 == 0) {
                        closeSession();
                        openSession();
                    }
                }
                results.close();
                lookupSession.close();
            }
            writeEndElement(lexicon);
        }
    }

    // Iterate over SenseAxes and write them to XMLX when not only
    // lexicons should be converted
    if (includeAxes) {
        logger.info("Processing sense axes");
        DetachedCriteria criteria = DetachedCriteria.forClass(SenseAxis.class)
                .add(Restrictions.sqlRestriction("lexicalResourceId = '" + lexicalResource.getName() + "'"));
        CriteriaIterator<Object> iter = new CriteriaIterator<Object>(criteria, sessionFactory, bufferSize);
        while (iter.hasNext()) {
            Object obj = iter.next();
            writeElement(obj);
            session.evict(obj);
            commitCounter++;
            if (commitCounter % 1000 == 0) {
                logger.info("progress: " + commitCounter + " class instances written to file");
            }
        }

        logger.info("Processing predicateargument axes");
        DetachedCriteria criteria2 = DetachedCriteria.forClass(PredicateArgumentAxis.class)
                .add(Restrictions.sqlRestriction("lexicalResourceId = '" + lexicalResource.getName() + "'"));
        CriteriaIterator<Object> iter2 = new CriteriaIterator<Object>(criteria2, sessionFactory, bufferSize);
        while (iter2.hasNext()) {
            Object obj = iter2.next();
            writeElement(obj);
            session.evict(obj);
            commitCounter++;
            if (commitCounter % 1000 == 0) {
                logger.info("progress: " + commitCounter + " class instances written to file");
            }
        }

    }
    writeEndElement(lexicalResource);

    writeEndDocument();
}

From source file:easycare.load.util.db.loader.OrganisationDataLoader.java

@Transactional
public void buildAllLocations(ContextOfCurrentLoad context) {

    log.info("starting to load locations");
    Session session = (Session) em.getDelegate();
    ScrollableResults scrollableResults = session
            .createQuery("from Organisation where name != 'ResMed' order by accountNumber").scroll();

    int numberOfLoadedLocations = 0;
    int numberOfLoadedStdOrgLocations = 0;
    int numberOfLocationsToCreateInStandardOrg = context.getInitialNumOfLocationsInStdOrg();
    int numberOfUpdatedOrganisations = 0;
    int numberOfPatientsPlacedIntoLocations = 0;
    while (scrollableResults.next()) {

        Organisation organisation = (Organisation) scrollableResults.get(0);

        if (isStandardOrg(context, numberOfUpdatedOrganisations)) {
            log.info("creating locations for org " + (numberOfUpdatedOrganisations + 1));
            for (int i = 0; i < numberOfLocationsToCreateInStandardOrg
                    && numberOfLoadedStdOrgLocations < context.getTotalNumberOfLocationsInStandardOrgs(); i++) {
                int oldPatientsInOrg = numberOfPatientsPlacedIntoLocations;
                numberOfPatientsPlacedIntoLocations += (context.getNumberOfPatientsInStandardOrgLocation());
                log.info("creating location " + (i + 1) + "with patients from " + (oldPatientsInOrg + 1)
                        + " to " + (numberOfPatientsPlacedIntoLocations + 1));
                createAndAddLocationToOrganisation(context, i, organisation);
                numberOfLoadedStdOrgLocations++;
                numberOfLoadedLocations++;
            }/*ww  w  .  j  av a  2s.  co m*/
            session.update(organisation);
            em.flush();
            em.clear();
            numberOfLocationsToCreateInStandardOrg++;
        } else {
            int oldPatientsInOrg = numberOfPatientsPlacedIntoLocations;
            numberOfPatientsPlacedIntoLocations += context.getNumberOfPatientsPerSmallOrg();
            log.info(
                    "creating org " + (numberOfUpdatedOrganisations + 1) + " as a small org with patients from "
                            + (oldPatientsInOrg + 1) + " to " + (numberOfPatientsPlacedIntoLocations + 1));
            createAndAddLocationToOrganisation(context, ContextOfCurrentLoad.NUMBER_OF_LOCATIONS_IN_SMALL_ORG,
                    organisation);
            numberOfLoadedLocations++;
        }
        numberOfUpdatedOrganisations++;

        if (numberOfLoadedLocations % SMALL_CHUNK == 0) {
            em.flush();
            em.clear();
            log.debug("finished small chunk of locations up to " + numberOfLoadedLocations + "the location");
        }
    }

    log.info("finished to load locations ");
}

From source file:easycare.load.util.db.loader.UserDataLoader.java

@Transactional
public void buildAllUsers(ContextOfCurrentLoad context) {

    log.info("starting to load users ");
    Session session = (Session) em.getDelegate();
    ScrollableResults scrollableResults = session
            .createQuery("from Organisation where name != 'ResMed' order by accountNumber").scroll();

    int numberOfOrganisationsWithAddedUsers = 0;
    while (scrollableResults.next()) {

        Organisation organisation = (Organisation) scrollableResults.get(0);
        String organisationNumber = StringUtils.remove(organisation.getAccountNumber(), "APT");

        loadOrgUsers(context, organisation, organisationNumber);

        if (++numberOfOrganisationsWithAddedUsers % SMALL_CHUNK == 0) {
            em.flush();/*from w w  w .  ja v  a2s .c o m*/
            em.clear();
            log.debug("finished small chunk of orgs with users up to " + numberOfOrganisationsWithAddedUsers
                    + "org");
        }
        log.info("Finished users for org " + organisation);
    }
    log.info("Finished to load users ");
}

From source file:edu.emory.library.tast.util.CSVUtils.java

License:Open Source License

private static DictionaryInfo[] getAllData(Session sess, TastDbQuery query, boolean useSQL,
        ZipOutputStream zipStream, boolean codes, String conditions) throws FileNotFoundException, IOException {

    SimpleDateFormat dateFormatter = new SimpleDateFormat(
            AppConfig.getConfiguration().getString(AppConfig.FORMAT_DATE_CVS));

    //insert the bom - byte order marker
    final byte[] bom = new byte[] { (byte) 0xEF, (byte) 0xBB, (byte) 0xBF };
    zipStream.write(bom);/*from   w w  w .  j a va  2 s . c om*/
    CSVWriter writer = new CSVWriter(new OutputStreamWriter(zipStream, encoding), ',');

    //CSVWriter writer = new CSVWriter(new OutputStreamWriter(zipStream), ',');
    ScrollableResults queryResponse = null;

    Map dictionaries = new HashMap();

    try {
        queryResponse = query.executeScrollableQuery(sess, useSQL);

        Attribute[] populatedAttrs = query.getPopulatedAttributes();

        if (conditions != "") {
            String[] con = new String[1];
            con[0] = conditions;
            writer.writeNext(con);
        }

        String[] row = new String[populatedAttrs.length - 1];
        for (int i = 1; i < populatedAttrs.length; i++) {
            row[i - 1] = populatedAttrs[i].getName();
        }

        writer.writeNext(row);

        int cnt = 0;

        while (queryResponse.next()) {

            cnt++;

            Object[] result = queryResponse.get();

            row = new String[populatedAttrs.length - 1];
            for (int j = 1; j < populatedAttrs.length; j++) {
                if (result[j] == null) {
                    row[j - 1] = "";
                } else {
                    if (!codes) {
                        if (result[j] instanceof Date)
                            row[j - 1] = dateFormatter.format(result[j]);
                        else
                            row[j - 1] = result[j].toString();
                        if (result[j] instanceof Dictionary) {
                            if (dictionaries.containsKey(populatedAttrs[j].toString())) {
                                DictionaryInfo info = (DictionaryInfo) dictionaries
                                        .get(populatedAttrs[j].toString());
                                if (!info.attributes.contains(populatedAttrs[j])) {
                                    info.attributes.add(populatedAttrs[j]);
                                }
                            } else {
                                DictionaryInfo info = new DictionaryInfo();
                                info.attributes.add(populatedAttrs[j]);
                                info.dictionary = result[j].getClass();
                                dictionaries.put(populatedAttrs[j].toString(), info);
                            }
                        }
                    } else {
                        if (result[j] instanceof Dictionary) {
                            row[j - 1] = ((Dictionary) result[j]).getId().toString();
                            if (dictionaries.containsKey(populatedAttrs[j].toString())) {
                                DictionaryInfo info = (DictionaryInfo) dictionaries
                                        .get(populatedAttrs[j].toString());
                                if (!info.attributes.contains(populatedAttrs[j])) {
                                    info.attributes.add(populatedAttrs[j]);
                                }
                            } else {
                                DictionaryInfo info = new DictionaryInfo();
                                info.attributes.add(populatedAttrs[j]);
                                info.dictionary = result[j].getClass();
                                dictionaries.put(populatedAttrs[j].toString(), info);
                            }
                        } else {
                            if (result[j] instanceof Date)
                                row[j - 1] = dateFormatter.format(result[j]);
                            else
                                row[j - 1] = result[j].toString();
                        }
                    }
                }
            }
            writer.writeNext(row);
        }

        writer.writeNext(new String[] { "The number of total records: " + cnt });

        writer.flush();
        return (DictionaryInfo[]) dictionaries.values().toArray(new DictionaryInfo[] {});

    } finally {
        if (queryResponse != null) {
            queryResponse.close();
        }
    }
}

From source file:edu.emory.library.tast.util.CSVUtils.java

License:Open Source License

private static void getAllData(Session sess, TastDbQuery query, boolean useSQL, ZipOutputStream zipStream,
        boolean codes) throws FileNotFoundException, IOException {
    SimpleDateFormat dateFormatter = new SimpleDateFormat(
            AppConfig.getConfiguration().getString(AppConfig.FORMAT_DATE_CVS));
    //insert the bom - byte order marker
    final byte[] bom = new byte[] { (byte) 0xEF, (byte) 0xBB, (byte) 0xBF };
    zipStream.write(bom);/*from w w  w . ja v a2  s .  com*/
    CSVWriter writer = new CSVWriter(new OutputStreamWriter(zipStream, encoding), ',');

    //TODO this snippet below is used for testing purposes only 
    /*File file = new File("c:\\tmp\\voyage.csv");
    FileOutputStream fout = new FileOutputStream(file);
    final byte[] bom = new byte[] { (byte)0xEF, (byte)0xBB, (byte)0xBF };              
     fout.write(bom);       
    CSVWriter writer = new CSVWriter(new OutputStreamWriter(fout, encoding), ',');*/

    ScrollableResults queryResponse = null;

    Map dictionaries = new HashMap();

    try {
        //query to retrieve users for the submissions 
        HashMap users = getUsersForSubmissions(sess);
        boolean usersExist = false;
        if (users != null && users.size() > 0) {
            usersExist = true;
        }
        //query for all the voyages
        queryResponse = query.executeScrollableQuery(sess, useSQL);

        Attribute[] populatedAttrs = query.getPopulatedAttributes();

        String[] row = new String[populatedAttrs.length + 1];
        int i;
        for (i = 0; i < populatedAttrs.length; i++) {
            row[i] = populatedAttrs[i].getName();
        }
        row[i] = "username";
        writer.writeNext(row);

        int cnt = 0;
        String userName = null;
        while (queryResponse.next()) {
            cnt++;
            Object[] result = queryResponse.get();

            row = new String[populatedAttrs.length + 1];
            int j;
            for (j = 0; j < populatedAttrs.length; j++) {
                if (populatedAttrs[j].getName().equals("iid")) {
                    userName = null;
                    if (usersExist) {
                        userName = (String) users.get(result[j]);
                    }
                }
                if (result[j] == null) {
                    row[j] = "";
                } else if (result[j] instanceof Date) {
                    row[j] = dateFormatter.format(result[j]);
                } else if (codes) {
                    if (result[j] instanceof Dictionary) {
                        row[j] = ((Dictionary) result[j]).getId().toString();
                    } else {
                        row[j] = result[j].toString();
                    }
                } else {//labels
                    row[j] = result[j].toString();
                }
            }
            if (userName != null) {
                row[j++] = userName;
            }
            writer.writeNext(row);
        }

        writer.flush();
    } catch (IOException io) {
        io.printStackTrace();
    } finally {
        if (queryResponse != null) {
            queryResponse.close();
        }
    }
}

From source file:edu.harvard.med.screensaver.db.ScreenResultsDAOImpl.java

License:Open Source License

public int createScreenedReagentCounts(final ScreenType screenType, Screen study,
        AnnotationType positiveAnnotationType, AnnotationType overallAnnotationType) {
    // Break this into two separate queries because of Hibernate bug (http://opensource.atlassian.com/projects/hibernate/browse/HHH-1615):
    // when using the "group by" clause with a full object (as opposed to an attribute of the object/table),
    // Hibernate is requiring that every attribute of the object be specified in a "group by" and not 
    // just the object itself.  so the workaround is to query once to get the id's then once again to 
    // get the objects.

    log.info("1. get the reagent id's for the positive counts");
    ScrollableResults sr = runScrollQuery(new edu.harvard.med.screensaver.db.ScrollQuery() {
        public ScrollableResults execute(Session session) {
            HqlBuilder builder = new HqlBuilder();
            builder.select("r", "id").selectExpression("count(*)").from(AssayWell.class, "aw")
                    .from("aw", AssayWell.libraryWell, "w", JoinType.INNER)
                    .from("w", Well.latestReleasedReagent, "r", JoinType.INNER)
                    .from("w", Well.library, "l", JoinType.INNER)
                    .where("l", "screenType", Operator.EQUAL, screenType)
                    .where("w", "libraryWellType", Operator.EQUAL, LibraryWellType.EXPERIMENTAL);
            builder.where("aw", "positive", Operator.EQUAL, Boolean.TRUE);
            builder.groupBy("r", "id");
            log.debug("hql: " + builder.toHql());
            return builder.toQuery(session, true).setCacheMode(CacheMode.IGNORE)
                    .scroll(ScrollMode.FORWARD_ONLY);
        }//from www. ja v a2  s  . com
    });

    Map<Integer, Long> positivesMap = Maps.newHashMap();
    while (sr.next()) {
        Object[] row = sr.get();
        positivesMap.put((Integer) row[0], (Long) row[1]);
    }

    log.info("2. get the reagent id's for the overall counts");
    sr = runScrollQuery(new edu.harvard.med.screensaver.db.ScrollQuery() {
        public ScrollableResults execute(Session session) {
            HqlBuilder builder = new HqlBuilder();
            builder.select("r", "id").selectExpression("count(*)").from(AssayWell.class, "aw")
                    .from("aw", AssayWell.libraryWell, "w", JoinType.INNER)
                    .from("w", Well.library, "l", JoinType.INNER)
                    .from("w", Well.latestReleasedReagent, "r", JoinType.INNER)
                    .where("l", "screenType", Operator.EQUAL, screenType)
                    .where("w", "libraryWellType", Operator.EQUAL, LibraryWellType.EXPERIMENTAL)
                    .groupBy("r", "id");
            log.debug("hql: " + builder.toHql());
            return builder.toQuery(session, true).setCacheMode(CacheMode.IGNORE)
                    .scroll(ScrollMode.FORWARD_ONLY);
        }
    });

    Map<Integer, Long> overallMap = Maps.newHashMap();
    while (sr.next()) {
        Object[] row = sr.get();
        overallMap.put((Integer) row[0], (Long) row[1]);
    }

    log.info("3. get the Reagents");
    sr = runScrollQuery(new edu.harvard.med.screensaver.db.ScrollQuery() {
        public ScrollableResults execute(Session session) {
            HqlBuilder builder = new HqlBuilder();
            builder.select("r").distinctProjectionValues().from(AssayWell.class, "aw")
                    .from("aw", AssayWell.libraryWell, "w", JoinType.INNER)
                    .from("w", Well.library, "l", JoinType.INNER)
                    .from("w", Well.latestReleasedReagent, "r", JoinType.INNER)
                    .where("l", "screenType", Operator.EQUAL, screenType)
                    .where("w", "libraryWellType", Operator.EQUAL, LibraryWellType.EXPERIMENTAL);
            log.debug("hql: " + builder.toHql());
            return builder.toQuery(session, true).setCacheMode(CacheMode.IGNORE)
                    .scroll(ScrollMode.FORWARD_ONLY);
        }
    });

    log.info("4. build the Study: positives: " + positivesMap.size() + ", reagents: " + overallMap.size());
    int count = 0;
    while (sr.next()) {
        Reagent r = (Reagent) sr.get()[0];

        AnnotationValue av = new AnnotationValue(overallAnnotationType, r, null,
                (double) overallMap.get(r.getReagentId()).intValue());
        _dao.saveOrUpdateEntity(av);
        Long positiveCount = positivesMap.get(r.getReagentId());
        if (positiveCount != null) {
            av = new AnnotationValue(positiveAnnotationType, r, null, (double) positiveCount.intValue());
            _dao.saveOrUpdateEntity(av);
        }
        // Note: due to memory performance, we will build the study_reagent_link later
        if (count++ % ROWS_TO_CACHE == 0) {
            log.debug("flushing");
            _dao.flush();
            _dao.clear();
        }
        if (count % 10000 == 0) {
            log.info("" + count + " reagents processed");
        }
    }

    log.info("save the study");
    _dao.saveOrUpdateEntity(study);
    _dao.flush();
    log.info("populateStudyReagentLinkTable");
    int reagentCount = populateStudyReagentLinkTable(study.getScreenId());
    log.info("done: positives: " + positivesMap.size() + ", reagents: " + overallMap.size());
    return reagentCount;
}

From source file:edu.harvard.med.screensaver.io.screenresults.ScreenResultReporter.java

License:Open Source License

/**
 * Create a study of the &quot;Confirmed Positives&quot; for all the pool SilencingReagents in the DB.
 * (re: {@link DataType#CONFIRMED_POSITIVE_INDICATOR} ) <br>
 * <ul>// ww  w .j  av a2s .co m
 * For RNAi
 * <li>Count of follow-up screens for well
 * <li>M+1 columns named "N duplexes confirming positive", where 0 <= N <= M, and M is the max number of duplexes per
 * pool in any library, currently = 4). The value in each column is the number of follow-up screens that confirmed the
 * well as a positive with N duplexes
 * </ul>
 * see [#2610] Confirmed Positives study creator<br>
 * 
 * @return total count of confirmed positives considered in this study (informational)
 */
public int createSilencingReagentConfirmedPositiveSummary(Screen study) {
    log.info("Get all of the pool reagents...");
    ScrollableResults sr = _dao.runScrollQuery(new edu.harvard.med.screensaver.db.ScrollQuery() {
        public ScrollableResults execute(Session session) {
            HqlBuilder builder = new HqlBuilder();
            builder.select("pr").from(Library.class, "l").from("l", Library.wells, "w", JoinType.INNER)
                    .from("w", Well.latestReleasedReagent, "pr", JoinType.INNER)
                    .where("l", "pool", Operator.EQUAL, Boolean.TRUE);
            return builder.toQuery(session, true).setCacheMode(CacheMode.IGNORE)
                    .scroll(ScrollMode.FORWARD_ONLY);
        }
    });

    log.info("Create the annotation types for the study.");
    AnnotationType averageConfirmedPositivesPerScreen = study.createAnnotationType(
            DEFAULT_ANNOTATION_NAME_WEIGHTED_AVERAGE, DEFAULT_ANNOTATION_TITLE_WEIGHTED_AVERAGE, true);
    _dao.persistEntity(averageConfirmedPositivesPerScreen);
    AnnotationType numberOfScreensAT = study.createAnnotationType(DEFAULT_ANNOTATION_NAME_NUMBER_OF_SCREENS,
            DEFAULT_ANNOTATION_TITLE_NUMBER_OF_SCREENS, true);
    _dao.persistEntity(numberOfScreensAT);
    // Create the bin-count annotation types (for "screens confirming # duplexes...")
    Map<Integer, AnnotationType> binToAnnotationTypeMap = Maps.newHashMap();
    for (int i = 0; i <= 4; i++) // todo: make this a dynamic cardinality 
    {
        AnnotationType screenCounter = study.createAnnotationType(
                DEFAULT_ANNOTATION_NAME_COUNT_OF_SCREENS_N.format(i),
                DEFAULT_ANNOTATION_TITLE_COUNT_OF_SCREENS_N.format(i), true);
        binToAnnotationTypeMap.put(i, screenCounter);
        _dao.persistEntity(screenCounter);
    }
    _dao.flush();
    _dao.clear();

    log.info("scroll through the pool reagents...");
    int countOfDuplexReagentsConfirmed = 0;
    int count = 0;

    while (sr.next()) {
        SilencingReagent poolReagent = (SilencingReagent) sr.get(0);

        ConfirmationReport report = getDuplexReconfirmationReport(poolReagent);

        int[] binToScreenCount = report.getBinToScreenCount(poolReagent);
        int numberOfScreens = 0;
        for (int bin = 0; bin < binToScreenCount.length; bin++) {
            int screenCount = binToScreenCount[bin];

            AnnotationType at = binToAnnotationTypeMap.get(bin);
            // note: for memory performance, we're side-stepping the AnnotationType.createAnnotationValue() method
            AnnotationValue av = new AnnotationValue(at, poolReagent, null, (double) screenCount);
            _dao.saveOrUpdateEntity(av);

            numberOfScreens += screenCount;
            countOfDuplexReagentsConfirmed += screenCount * bin;
        }

        if (numberOfScreens > 0) {
            // note: for memory performance, we're side-stepping the AnnotationType.createAnnotationValue() method
            AnnotationValue av = new AnnotationValue(averageConfirmedPositivesPerScreen, poolReagent, null,
                    new Double("" + report.getWeightedAverage()));
            _dao.saveOrUpdateEntity(av);

        }
        // note: for memory performance, we're side-stepping the AnnotationType.createAnnotationValue() method
        AnnotationValue av = new AnnotationValue(numberOfScreensAT, poolReagent, null,
                (double) numberOfScreens);
        _dao.saveOrUpdateEntity(av);

        // for memory performance clear the session every CACHE_SIZE number of iterations
        if (count++ % AbstractDAO.ROWS_TO_CACHE == 0) {
            log.debug("clearing & flushing session");
            _dao.flush();
            _dao.clear();
        }
        if (count % 1000 == 0) {
            log.info("" + count + " reagents processed");
        }
    }
    log.info("" + count + " reagents processed");
    _dao.flush();
    _dao.clear();

    log.info("countOfDuplexReagentsConfirmed: " + countOfDuplexReagentsConfirmed);
    log.info("populateStudyReagentLinkTable");
    _screenDao.populateStudyReagentLinkTable(study.getScreenId());
    log.info("Study created: " + study.getTitle() + ", reagents: " + countOfDuplexReagentsConfirmed);
    return countOfDuplexReagentsConfirmed;
}

From source file:edu.harvard.med.screensaver.io.screenresults.ScreenResultReporter.java

License:Open Source License

/**
 * for [#2268] new column to display # overlapping screens
 *//*from  w w w. j a va 2s .com*/
@Transactional
public int createScreenedReagentCounts(final ScreenType screenType, Screen study,
        AnnotationType positiveAnnotationType, AnnotationType overallAnnotationType) {
    // Break this into two separate queries because of an apparent Hibernate bug:
    // when using the "group by" clause with a full object (as opposed to an attribute of the object/table),
    // Hibernate is requiring that every attribute of the object be specified in a "group by" and not 
    // just the object itself.  so the workaround is to query once to get the id's then once again to 
    // get the objects.
    //    study = _dao.mergeEntity(study);
    //    positiveAnnotationType = _dao.mergeEntity(positiveAnnotationType);
    //    overallAnnotationType = _dao.mergeEntity(overallAnnotationType);
    //    _dao.flush();

    log.info("1. get the reagent id's for the positive counts");
    ScrollableResults sr = _dao.runScrollQuery(new edu.harvard.med.screensaver.db.ScrollQuery() {
        public ScrollableResults execute(Session session) {
            HqlBuilder builder = new HqlBuilder();
            builder.select("r", "id").selectExpression("count(*)").from(AssayWell.class, "aw")
                    .from("aw", AssayWell.libraryWell, "w", JoinType.INNER)
                    .from("w", Well.latestReleasedReagent, "r", JoinType.INNER)
                    .from("w", Well.library, "l", JoinType.INNER)
                    .where("l", "screenType", Operator.EQUAL, screenType)
                    .where("w", "libraryWellType", Operator.EQUAL, LibraryWellType.EXPERIMENTAL);
            builder.where("aw", "positive", Operator.EQUAL, Boolean.TRUE);
            builder.groupBy("r", "id");
            log.debug("hql: " + builder.toHql());
            return builder.toQuery(session, true).setCacheMode(CacheMode.IGNORE)
                    .scroll(ScrollMode.FORWARD_ONLY);
        }
    });

    Map<Integer, Long> positivesMap = Maps.newHashMap();
    while (sr.next()) {
        Object[] row = sr.get();
        positivesMap.put((Integer) row[0], (Long) row[1]);
    }

    log.info("2. get the reagent id's for the overall counts");
    sr = _dao.runScrollQuery(new edu.harvard.med.screensaver.db.ScrollQuery() {
        public ScrollableResults execute(Session session) {
            HqlBuilder builder = new HqlBuilder();
            builder.select("r", "id").selectExpression("count(*)").from(AssayWell.class, "aw")
                    .from("aw", AssayWell.libraryWell, "w", JoinType.INNER)
                    .from("w", Well.library, "l", JoinType.INNER)
                    .from("w", Well.latestReleasedReagent, "r", JoinType.INNER)
                    .where("l", "screenType", Operator.EQUAL, screenType)
                    .where("w", "libraryWellType", Operator.EQUAL, LibraryWellType.EXPERIMENTAL)
                    .groupBy("r", "id");
            log.debug("hql: " + builder.toHql());
            return builder.toQuery(session, true).setCacheMode(CacheMode.IGNORE)
                    .scroll(ScrollMode.FORWARD_ONLY);
        }
    });

    log.info("begin assigning values to the study");
    int overallCount = 0;
    Map<Integer, Long> overallMap = Maps.newHashMap();
    while (sr.next()) {
        Object[] row = sr.get();
        Integer r_id = (Integer) row[0];
        Long count = (Long) row[1];
        Reagent r = _dao.findEntityById(Reagent.class, r_id, true);
        // note: for memory performance, we're side-stepping the AnnotationType.createAnnotationValue() method
        AnnotationValue av = new AnnotationValue(overallAnnotationType, r, null, (double) count);
        _dao.persistEntity(av);
        Long positiveCount = positivesMap.get(r_id);
        if (positiveCount != null) {
            // note: for memory performance, we're side-stepping the AnnotationType.createAnnotationValue() method
            av = new AnnotationValue(positiveAnnotationType, r, null, (double) positiveCount.intValue());
            _dao.persistEntity(av);
        }
        // Note: due to memory performance, we will build the study_reagent_link later
        if (count++ % AbstractDAO.ROWS_TO_CACHE == 0) {
            log.debug("flushing");
            _dao.flush();
            _dao.clear();
        }
        if (++overallCount % 10000 == 0) {
            log.info("" + overallCount + " reagents processed");
        }
    }

    log.info("save the study");
    // unnecessary since study is already persisted, and the reagents will be linked by the populateStudyReagentLinkTable - sde4
    // _dao.mergeEntity(study);
    _dao.flush();
    log.info("populateStudyReagentLinkTable");
    int reagentCount = _screenDao.populateStudyReagentLinkTable(study.getScreenId());
    log.info("done: positives: " + positivesMap.size() + ", reagents: " + overallCount);
    return reagentCount;
}