Example usage for org.hibernate FlushMode MANUAL

List of usage examples for org.hibernate FlushMode MANUAL

Introduction

In this page you can find the example usage for org.hibernate FlushMode MANUAL.

Prototype

FlushMode MANUAL

To view the source code for org.hibernate FlushMode MANUAL.

Click Source Link

Document

The Session is only ever flushed when Session#flush is explicitly called by the application.

Usage

From source file:org.osaf.cosmo.dao.hibernate.UserDaoImpl.java

License:Apache License

private User findUserByUsernameIgnoreCase(String username) {
    Session session = getSession();//from   ww  w.j a  v a2  s .  c o  m
    Query hibQuery = session.getNamedQuery("user.byUsername.ignorecase").setParameter("username", username);
    hibQuery.setCacheable(true);
    hibQuery.setFlushMode(FlushMode.MANUAL);
    List users = hibQuery.list();
    if (users.size() > 0)
        return (User) users.get(0);
    else
        return null;
}

From source file:org.osaf.cosmo.dao.hibernate.UserDaoImpl.java

License:Apache License

private User findUserByUsernameOrEmailIgnoreCaseAndId(Long userId, String username, String email) {
    Session session = getSession();/*from   w  w w. j a  v a 2  s . c  o  m*/
    Query hibQuery = session.getNamedQuery("user.byUsernameOrEmail.ignorecase.ingoreId")
            .setParameter("username", username).setParameter("email", email).setParameter("userid", userId);
    hibQuery.setCacheable(true);
    hibQuery.setFlushMode(FlushMode.MANUAL);
    List users = hibQuery.list();
    if (users.size() > 0)
        return (User) users.get(0);
    else
        return null;
}

From source file:org.osaf.cosmo.dao.hibernate.UserDaoImpl.java

License:Apache License

private User findUserByEmail(String email) {
    Session session = getSession();//from w w  w .  j a v a 2 s  . co  m
    Query hibQuery = session.getNamedQuery("user.byEmail").setParameter("email", email);
    hibQuery.setCacheable(true);
    hibQuery.setFlushMode(FlushMode.MANUAL);
    List users = hibQuery.list();
    if (users.size() > 0)
        return (User) users.get(0);
    else
        return null;
}

From source file:org.osaf.cosmo.dao.hibernate.UserDaoImpl.java

License:Apache License

private User findUserByEmailIgnoreCase(String email) {
    Session session = getSession();/*  w  w w  . j  a v a 2  s. c  om*/
    Query hibQuery = session.getNamedQuery("user.byEmail.ignorecase").setParameter("email", email);
    hibQuery.setCacheable(true);
    hibQuery.setFlushMode(FlushMode.MANUAL);
    List users = hibQuery.list();
    if (users.size() > 0)
        return (User) users.get(0);
    else
        return null;
}

From source file:org.osaf.cosmo.dao.hibernate.UserDaoImpl.java

License:Apache License

private User findUserByUid(String uid) {
    Session session = getSession();//w w w . java2s  . co  m
    Query hibQuery = session.getNamedQuery("user.byUid").setParameter("uid", uid);
    hibQuery.setCacheable(true);
    hibQuery.setFlushMode(FlushMode.MANUAL);
    return (User) hibQuery.uniqueResult();
}

From source file:org.osaf.cosmo.hibernate.ThrowAwayHibernateSessionOnErrorInterceptor.java

License:Apache License

private void handleException() {

    // If session is bound to transaction, close it and create/bind
    // new session to prevent stale data when retrying transaction
    if (TransactionSynchronizationManager.hasResource(sessionFactory)) {

        if (log.isDebugEnabled())
            log.debug("throwing away bad session and binding new one");

        // Get current session and close
        SessionHolder sessionHolder = (SessionHolder) TransactionSynchronizationManager
                .unbindResource(sessionFactory);

        SessionFactoryUtils.closeSession(sessionHolder.getSession());

        // Open new session and bind (this session should be closed and
        // unbound elsewhere, for example OpenSessionInViewFilter)
        Session session = SessionFactoryUtils.getSession(sessionFactory, true);
        session.setFlushMode(FlushMode.MANUAL);
        TransactionSynchronizationManager.bindResource(sessionFactory, new SessionHolder(session));
    }//from  w  ww .  jav a  2 s  . co m
}

From source file:org.photovault.swingui.framework.PersistenceController.java

License:Open Source License

/**
 * Subclass is completely dependend on the given view and is a subcontroller. Also joins given persistence context.
 *
 * @param view//from   ww  w.ja  va  2  s  .  c  o m
 * @param parentController
 * @param persistenceContext
 */
public PersistenceController(Container view, AbstractController parentController, Session persistenceContext) {
    super(view, parentController);

    // Open a new persistence context for this controller, if none should be joined
    if (persistenceContext == null) {
        log.debug("Creating new persistence context for controller");
        this.persistenceContext = HibernateUtil.getSessionFactory().openSession();
        this.persistenceContext.setFlushMode(FlushMode.MANUAL);
        this.ownsPersistenceContext = true;
    } else {
        log.debug("Joining existing persistence context");
        this.persistenceContext = persistenceContext;
    }
    daoFactory = (HibernateDAOFactory) DAOFactory.instance(HibernateDAOFactory.class);
    daoFactory.setSession(this.persistenceContext);
}

From source file:org.projectforge.database.DatabaseDao.java

License:Open Source License

private long reindexObjects(final Class<?> clazz, final ReindexSettings settings) {
    final Session session = getSession();
    Criteria criteria = createCriteria(session, clazz, settings, true);
    final Long number = (Long) criteria.uniqueResult(); // Get number of objects to re-index (select count(*) from).
    final boolean scrollMode = number > MIN_REINDEX_ENTRIES_4_USE_SCROLL_MODE ? true : false;
    log.info("Starting re-indexing of " + number + " entries (total number) of type " + clazz.getName()
            + " with scrollMode=" + scrollMode + "...");
    final int batchSize = 1000;// NumberUtils.createInteger(System.getProperty("hibernate.search.worker.batch_size")
    final FullTextSession fullTextSession = Search.getFullTextSession(session);
    fullTextSession.setFlushMode(FlushMode.MANUAL);
    fullTextSession.setCacheMode(CacheMode.IGNORE);
    long index = 0;
    if (scrollMode == true) {
        // Scroll-able results will avoid loading too many objects in memory
        criteria = createCriteria(fullTextSession, clazz, settings, false);
        final ScrollableResults results = criteria.scroll(ScrollMode.FORWARD_ONLY);
        while (results.next() == true) {
            final Object obj = results.get(0);
            if (obj instanceof ExtendedBaseDO<?>) {
                ((ExtendedBaseDO<?>) obj).recalculate();
            }//www  .j  ava2s  .c om
            fullTextSession.index(obj); // index each element
            if (index++ % batchSize == 0)
                session.flush(); // clear every batchSize since the queue is processed
        }
    } else {
        criteria = createCriteria(session, clazz, settings, false);
        final List<?> list = criteria.list();
        for (final Object obj : list) {
            if (obj instanceof ExtendedBaseDO<?>) {
                ((ExtendedBaseDO<?>) obj).recalculate();
            }
            fullTextSession.index(obj);
            if (index++ % batchSize == 0)
                session.flush(); // clear every batchSize since the queue is processed
        }
    }
    final SearchFactory searchFactory = fullTextSession.getSearchFactory();
    searchFactory.optimize(clazz);
    log.info("Re-indexing of " + index + " objects of type " + clazz.getName() + " done.");
    return index;
}

From source file:org.projectforge.framework.persistence.database.DatabaseDao.java

License:Open Source License

private long reindexObjects(final Class<?> clazz, final ReindexSettings settings) {
    final Session session = sessionFactory.getCurrentSession();
    Criteria criteria = createCriteria(session, clazz, settings, true);
    final Long number = (Long) criteria.uniqueResult(); // Get number of objects to re-index (select count(*) from).
    final boolean scrollMode = number > MIN_REINDEX_ENTRIES_4_USE_SCROLL_MODE ? true : false;
    log.info("Starting re-indexing of " + number + " entries (total number) of type " + clazz.getName()
            + " with scrollMode=" + scrollMode + "...");
    final int batchSize = 1000;// NumberUtils.createInteger(System.getProperty("hibernate.search.worker.batch_size")
    final FullTextSession fullTextSession = Search.getFullTextSession(session);
    HibernateCompatUtils.setFlushMode(fullTextSession, FlushMode.MANUAL);
    HibernateCompatUtils.setCacheMode(fullTextSession, CacheMode.IGNORE);
    long index = 0;
    if (scrollMode == true) {
        // Scroll-able results will avoid loading too many objects in memory
        criteria = createCriteria(fullTextSession, clazz, settings, false);
        final ScrollableResults results = criteria.scroll(ScrollMode.FORWARD_ONLY);
        while (results.next() == true) {
            final Object obj = results.get(0);
            if (obj instanceof ExtendedBaseDO<?>) {
                ((ExtendedBaseDO<?>) obj).recalculate();
            }/*from   www .ja  v a  2 s.  c  om*/
            HibernateCompatUtils.index(fullTextSession, obj);
            if (index++ % batchSize == 0) {
                session.flush(); // clear every batchSize since the queue is processed
            }
        }
    } else {
        criteria = createCriteria(session, clazz, settings, false);
        final List<?> list = criteria.list();
        for (final Object obj : list) {
            if (obj instanceof ExtendedBaseDO<?>) {
                ((ExtendedBaseDO<?>) obj).recalculate();
            }
            HibernateCompatUtils.index(fullTextSession, obj);
            if (index++ % batchSize == 0) {
                session.flush(); // clear every batchSize since the queue is processed
            }
        }
    }
    final SearchFactory searchFactory = fullTextSession.getSearchFactory();
    searchFactory.optimize(clazz);
    log.info("Re-indexing of " + index + " objects of type " + clazz.getName() + " done.");
    return index;
}

From source file:org.sakaiproject.tool.assessment.facade.ItemHashUtil.java

License:Educational Community License

/**
 * Bit of a hack to allow reuse between {@link ItemFacadeQueries} and {@link PublishedItemFacadeQueries}.
 * Arguments are rather arbitrary extension points to support what we happen to <em>know</em> are the differences
 * between item and published item processing, as well as the common utilities/service dependencies.
 *
 * @param batchSize/*from   w  w  w .j a v a2s  .  c  o m*/
 * @param hqlQueries
 * @param concreteType
 * @param hashAndAssignCallback
 * @param hibernateTemplate
 * @return
 */
BackfillItemHashResult backfillItemHashes(int batchSize, Map<String, String> hqlQueries,
        Class<? extends ItemDataIfc> concreteType, Function<ItemDataIfc, ItemDataIfc> hashAndAssignCallback,
        HibernateTemplate hibernateTemplate) {

    final long startTime = System.currentTimeMillis();
    log.debug("Hash backfill starting for items of type [" + concreteType.getSimpleName() + "]");

    if (batchSize <= 0) {
        batchSize = 100;
    }
    final int flushSize = batchSize;

    final AtomicInteger totalItems = new AtomicInteger(0);
    final AtomicInteger totalItemsNeedingBackfill = new AtomicInteger(0);
    final AtomicInteger batchNumber = new AtomicInteger(0);
    final AtomicInteger recordsRead = new AtomicInteger(0);
    final AtomicInteger recordsUpdated = new AtomicInteger(0);
    final Map<Long, Throwable> hashingErrors = new TreeMap<>();
    final Map<Integer, Throwable> otherErrors = new TreeMap<>();
    final List<Long> batchElapsedTimes = new ArrayList<>();
    // always needed as *printable* average per-batch timing value, so just store as string. and cache at this
    // scope b/c we sometimes need to print a single calculation multiple times, e.g. in last batch and
    // at method exit
    final AtomicReference<String> currentAvgBatchElapsedTime = new AtomicReference<>("0.00");
    final AtomicBoolean areMoreItems = new AtomicBoolean(true);

    // Get the item totals up front since a) we know any questions created while the job is running will be
    // assigned hashes and thus won't need to be handled by the job and b) makes bookkeeping within the job much
    // easier
    hibernateTemplate.execute(session -> {
        session.setDefaultReadOnly(true);
        totalItems.set(countItems(hqlQueries, session));
        totalItemsNeedingBackfill.set(countItemsNeedingHashBackfill(hqlQueries, session));
        log.debug("Hash backfill required for [" + totalItemsNeedingBackfill + "] of [" + totalItems
                + "] items of type [" + concreteType.getSimpleName() + "]");
        return null;
    });

    while (areMoreItems.get()) {
        long batchStartTime = System.currentTimeMillis();
        batchNumber.getAndIncrement();
        final AtomicInteger itemsHashedInBatch = new AtomicInteger(0);
        final AtomicInteger itemsReadInBatch = new AtomicInteger(0);
        final AtomicReference<Throwable> failure = new AtomicReference<>(null);

        // Idea here is a) avoid very long running transactions and b) avoid reading all items into memory
        // and c) avoid weirdness, e.g. duplicate results, when paginating complex hibernate objects. So
        // there's a per-batch transaction, and each batch re-runs the same two item lookup querys, one to
        // get the list of IDs for the next page of items, and one to resolve those IDs to items
        try {
            new TransactionTemplate(transactionManager, requireNewTransaction()).execute(status -> {
                hibernateTemplate.execute(session -> {
                    List<ItemDataIfc> itemsInBatch = null;
                    try { // resource cleanup block
                        session.setFlushMode(FlushMode.MANUAL);
                        try { // initial read block (failures here are fatal)

                            // set up the actual result set for this batch of items. use error count to skip over failed items
                            final List<Long> itemIds = itemIdsNeedingHashBackfill(hqlQueries, flushSize,
                                    hashingErrors.size(), session);
                            itemsInBatch = itemsById(itemIds, hqlQueries, session);

                        } catch (RuntimeException e) {
                            // Panic on failure to read counts and/or the actual items in the batch.
                            // Otherwise would potentially loop indefinitely since this design has no way way to
                            // skip this page of results.
                            log.error("Failed to read batch of hashable items. Giving up at record ["
                                    + recordsRead + "] of [" + totalItemsNeedingBackfill + "] Type: ["
                                    + concreteType.getSimpleName() + "]", e);
                            areMoreItems.set(false); // force overall loop to exit
                            throw e; // force txn to give up
                        }

                        for (ItemDataIfc item : itemsInBatch) {
                            recordsRead.getAndIncrement();
                            itemsReadInBatch.getAndIncrement();

                            // Assign the item's hash/es
                            try {
                                log.debug("Backfilling hash for item [" + recordsRead + "] of ["
                                        + totalItemsNeedingBackfill + "] Type: [" + concreteType.getSimpleName()
                                        + "] ID: [" + item.getItemId() + "]");
                                hashAndAssignCallback.apply(item);
                                itemsHashedInBatch.getAndIncrement();
                            } catch (Throwable t) {
                                // Failures considered ignorable here... probably some unexpected item state
                                // that prevented hash calculation.
                                //
                                // Re the log statement... yes, the caller probably logs exceptions, but likely
                                // without stack traces, and we'd like to advertise failures as quickly as possible,
                                // so we go ahead and emit an error log here.
                                log.error("Item hash calculation failed for item [" + recordsRead + "] of ["
                                        + totalItemsNeedingBackfill + "] Type: [" + concreteType.getSimpleName()
                                        + "] ID: [" + (item == null ? "?" : item.getItemId()) + "]", t);
                                hashingErrors.put(item.getItemId(), t);
                            }

                        }
                        if (itemsHashedInBatch.get() > 0) {
                            session.flush();
                            recordsUpdated.getAndAdd(itemsHashedInBatch.get());
                        }
                        areMoreItems.set(itemsInBatch.size() >= flushSize);

                    } finally {
                        quietlyClear(session); // potentially very large, so clear aggressively
                    }
                    return null;
                }); // end session
                return null;
            }); // end transaction
        } catch (Throwable t) {
            // We're still in the loop over all batches, but something caused the current batch (and its
            // transaction) to exit abnormally. Logging of both success and failure cases is quite detailed,
            // and needs the same timing calcs, so is consolidated into the  'finally' block below.
            failure.set(t);
            otherErrors.put(batchNumber.get(), t);
        } finally {
            // Detailed batch-level reporting
            final long batchElapsed = (System.currentTimeMillis() - batchStartTime);
            batchElapsedTimes.add(batchElapsed);
            currentAvgBatchElapsedTime.set(new DecimalFormat("#.00")
                    .format(batchElapsedTimes.stream().collect(Collectors.averagingLong(l -> l))));
            if (failure.get() == null) {
                log.debug("Item hash backfill batch flushed to database. Type: [" + concreteType.getSimpleName()
                        + "] Batch number: [" + batchNumber + "] Items attempted in batch: [" + itemsReadInBatch
                        + "] Items succeeded in batch: [" + itemsHashedInBatch + "] Total items attempted: ["
                        + recordsRead + "] Total items succeeded: [" + recordsUpdated
                        + "] Total attemptable items: [" + totalItemsNeedingBackfill + "] Elapsed batch time: ["
                        + batchElapsed + "ms] Avg time/batch: [" + currentAvgBatchElapsedTime + "ms]");
            } else {
                // yes, caller probably logs exceptions later, but probably without stack traces, and we'd
                // like to advertise failures as quickly as possible, so we go ahead and emit an error log
                // here.
                log.error("Item hash backfill failed. Type: [" + concreteType.getSimpleName()
                        + "] Batch number: [" + batchNumber + "] Items attempted in batch: [" + itemsReadInBatch
                        + "] Items flushable (but failed) in batch: [" + itemsHashedInBatch
                        + "] Total items attempted: [" + recordsRead + "] Total items succeeded: ["
                        + recordsUpdated + "] Total attemptable items: [" + totalItemsNeedingBackfill
                        + "] Elapsed batch time: [" + batchElapsed + "ms] Avg time/batch: ["
                        + currentAvgBatchElapsedTime + "ms]", failure.get());
            }
        }
    } // end loop over all batches

    final long elapsedTime = System.currentTimeMillis() - startTime;
    log.debug("Hash backfill completed for items of type [" + concreteType.getSimpleName()
            + "]. Total items attempted: [" + recordsRead + "] Total items succeeded: [" + recordsUpdated
            + "] Target attemptable items: [" + totalItemsNeedingBackfill + "] Total elapsed time: ["
            + elapsedTime + "ms] Total batches: [" + batchNumber + "] Avg time/batch: ["
            + currentAvgBatchElapsedTime + "ms]");

    return new BackfillItemHashResult(elapsedTime, totalItems.get(), totalItemsNeedingBackfill.get(),
            recordsRead.get(), recordsUpdated.get(), flushSize, hashingErrors, otherErrors);
}