Example usage for org.hibernate ScrollMode FORWARD_ONLY

List of usage examples for org.hibernate ScrollMode FORWARD_ONLY

Introduction

In this page you can find the example usage for org.hibernate ScrollMode FORWARD_ONLY.

Prototype

ScrollMode FORWARD_ONLY

To view the source code for org.hibernate ScrollMode FORWARD_ONLY.

Click Source Link

Document

Requests a scrollable result that is only scrollable forwards.

Usage

From source file:nl.strohalm.cyclos.utils.lucene.IndexOperationRunner.java

License:Open Source License

/**
 * Recreates an index. If the force parameter is false, execute only if the index is corrupt or missing
 *///from w w w. j av a2s .c  o m
private void rebuild(final Class<? extends Indexable> entityType, final boolean force,
        final boolean createAlert) {
    boolean execute = true;
    // When not forced, run only
    if (!force) {
        final IndexStatus status = indexHandler.getIndexStatus(entityType);
        execute = status != IndexStatus.CORRUPT && status != IndexStatus.MISSING;
    }
    if (!execute) {
        return;
    }

    if (createAlert) {
        // Create the alert for index rebuilding
        createAlert(SystemAlert.Alerts.INDEX_REBUILD_START, entityType);
    }

    IndexWriter indexWriter = cachedWriters.get(entityType);
    if (indexWriter != null) {
        try {
            indexWriter.close();
        } catch (final Exception e) {
            // Silently ignore
        }
        cachedWriters.remove(entityType);
    }
    // Remove all files and recreate the directory
    final File dir = indexHandler.getIndexDir(entityType);
    try {
        FileUtils.deleteDirectory(dir);
    } catch (final IOException e) {
        // Silently ignore
    }
    dir.mkdirs();

    final DocumentMapper documentMapper = indexHandler.getDocumentMapper(entityType);
    final IndexWriter writer = getWriter(entityType);

    // Now, we should add all entities to the index
    boolean success = readonlyTransactionTemplate.execute(new TransactionCallback<Boolean>() {
        public Boolean doInTransaction(final TransactionStatus status) {
            Session session = getSession();
            ScrollableResults scroll = session.createQuery(resolveHql(entityType))
                    .scroll(ScrollMode.FORWARD_ONLY);

            try {
                int index = 0;
                while (scroll.next()) {
                    Indexable entity = (Indexable) scroll.get(0);
                    Document document = documentMapper.map(entity);
                    try {
                        writer.addDocument(document);
                    } catch (CorruptIndexException e) {
                        handleIndexCorrupted(entityType);
                        return false;
                    } catch (IOException e) {
                        LOG.error("Error while adding document to index after rebuilding "
                                + ClassHelper.getClassName(entityType), e);
                        return false;
                    }
                    // Every batch, clear the session and commit the writer
                    if (++index % 30 == 0) {
                        session.clear();
                        commit(entityType, writer);
                    }
                }
                return true;
            } finally {
                scroll.close();
            }
        }
    });

    // Finish the writer operation
    try {
        if (success) {
            commit(entityType, writer);
        } else {
            rollback(entityType, writer);
        }
    } finally {
        if (createAlert) {
            // Create the alert for index rebuilding
            createAlert(SystemAlert.Alerts.INDEX_REBUILD_END, entityType);
        }
    }
}

From source file:nl.strohalm.cyclos.utils.lucene.IndexOperationRunner.java

License:Open Source License

private boolean rebuildMemberAds(final Long userId, final Analyzer analyzer, final Session session) {
    final Class<? extends Indexable> entityType = Ad.class;
    final IndexWriter writer = getWriter(entityType);
    boolean success = false;

    DocumentMapper documentMapper = indexHandler.getDocumentMapper(entityType);
    try {// w w w.j  a  v a2 s.  c  o m
        writer.deleteDocuments(new Term("owner", userId.toString()));
    } catch (CorruptIndexException e) {
        handleIndexCorrupted(entityType);
        success = false;
    } catch (IOException e) {
        LOG.error("Error while reindexing a member's advertisements", e);
        success = false;
    }

    ScrollableResults scroll = session
            .createQuery("from Ad a where a.deleteDate is null and a.owner.id = " + userId)
            .scroll(ScrollMode.FORWARD_ONLY);

    try {
        int index = 0;
        while (scroll.next()) {
            Indexable entity = (Indexable) scroll.get(0);
            Document document = documentMapper.map(entity);
            try {
                writer.addDocument(document, analyzer);
            } catch (CorruptIndexException e) {
                handleIndexCorrupted(entityType);
                success = false;
                break;
            } catch (IOException e) {
                LOG.error("Error while adding advertisements to index", e);
                success = false;
                break;
            }
            // Every batch, clear the session and commit the writer
            if (++index % 30 == 0) {
                session.clear();
            }
        }
        success = true;
    } finally {
        scroll.close();
    }

    // Finish the writer operation
    if (success) {
        commit(entityType, writer);
        return true;
    } else {
        rollback(entityType, writer);
        return false;
    }
}

From source file:nl.strohalm.cyclos.utils.lucene.IndexOperationRunner.java

License:Open Source License

private boolean rebuildMemberRecords(final Long userId, final Analyzer analyzer, final Session session) {
    final Class<? extends Indexable> entityType = MemberRecord.class;
    final IndexWriter writer = getWriter(entityType);
    boolean success = false;

    DocumentMapper documentMapper = indexHandler.getDocumentMapper(entityType);
    try {/*from w ww  .j a v a2s .com*/
        writer.deleteDocuments(new Term("element", userId.toString()));
    } catch (CorruptIndexException e) {
        handleIndexCorrupted(entityType);
        success = false;
    } catch (IOException e) {
        LOG.error("Error while reindexing an user's records", e);
        success = false;
    }

    ScrollableResults scroll = session.createQuery("from MemberRecord mr where mr.element.id = " + userId)
            .scroll(ScrollMode.FORWARD_ONLY);

    try {
        int index = 0;
        while (scroll.next()) {
            Indexable entity = (Indexable) scroll.get(0);
            Document document = documentMapper.map(entity);
            try {
                writer.addDocument(document, analyzer);
            } catch (CorruptIndexException e) {
                handleIndexCorrupted(entityType);
                success = false;
                break;
            } catch (IOException e) {
                LOG.error("Error while adding member records to index", e);
                success = false;
                break;
            }
            // Every batch, clear the session and commit the writer
            if (++index % 30 == 0) {
                session.clear();
            }
        }
        success = true;
    } finally {
        scroll.close();
    }

    // Finish the writer operation
    if (success) {
        commit(entityType, writer);
        return true;
    } else {
        rollback(entityType, writer);
        return false;
    }
}

From source file:nl.strohalm.cyclos.utils.ScrollableResultsIterator.java

License:Open Source License

public ScrollableResultsIterator(final Query query, final Transformer<Object[], T> transformer) {
    this.results = query.scroll(ScrollMode.FORWARD_ONLY);
    if (query instanceof SQLQuery) {
        // The getReturnTypes doesn't work for SQLQueries... Assume an array
        array = true;//from   ww  w.j a  v  a 2  s.  c  om
    } else {
        // this (extra) check to see if the query starts with "select new" is just to support the
        // following case: SELECT new A(e.prop1, e.prop2) FROM Entity e ...
        // in that case we musn't return an array in the next() method.
        array = query.getReturnTypes().length > 1
                && !query.getQueryString().trim().toLowerCase().startsWith("select new");
    }
    this.transformer = transformer;
    getNextObject();

    DataIteratorHelper.registerOpen(this, true);
}

From source file:ome.services.db.SearchTest.java

License:Open Source License

@Test
public void testReindexingExperimenter() throws Exception {
    HibernateTest ht = new HibernateTest();
    ht.setupSession();//w  ww.  j  a v a2s  .c  o  m

    FullTextSession fullTextSession = Search.getFullTextSession(ht.s);
    fullTextSession.setFlushMode(FlushMode.MANUAL);
    fullTextSession.setCacheMode(CacheMode.IGNORE);
    Transaction transaction = fullTextSession.beginTransaction();
    // Scrollable results will avoid loading too many objects in memory
    ScrollableResults results = fullTextSession.createCriteria(Experimenter.class)
            .scroll(ScrollMode.FORWARD_ONLY);
    int index = 0;
    int batchSize = 10;
    while (results.next()) {
        index++;
        fullTextSession.index(results.get(0)); // index each element
        if (index % batchSize == 0) {
            ht.s.clear(); // clear every batchSize since the queue is
            // processed
        }
    }
    ht.closeSession();

    ht.setupSession();
    List<Experimenter> list = query(ht, "root", Experimenter.class, "omeName");
    assertTrue(list.toString(), list.size() == 1);
    ht.closeSession();
}

From source file:onl.netfishers.netshot.work.tasks.CheckGroupComplianceTask.java

License:Open Source License

@Override
public void run() {
    logger.debug("Starting check compliance task for group {}.", deviceGroup.getId());
    this.logIt(String.format("Check compliance task for group %s.", deviceGroup.getName()), 5);

    Session session = Database.getSession();
    try {//from   w w  w.  j  a va 2 s. co  m
        @SuppressWarnings("unchecked")
        List<Policy> policies = session.createCriteria(Policy.class).list();

        session.beginTransaction();
        session.createQuery(
                "delete from CheckResult c where c.key.device.id in (select d.id as id from DeviceGroup g1 join g1.cachedDevices d where g1.id = :id)")
                .setLong("id", deviceGroup.getId()).executeUpdate();
        for (Policy policy : policies) {
            ScrollableResults devices = session.createQuery(
                    "from Device d join fetch d.lastConfig where d.id in (select d.id as id from DeviceGroup g1 join g1.cachedDevices d join d.ownerGroups g2 join g2.appliedPolicies p where g1.id = :id and p.id = :pid)")
                    .setLong("id", deviceGroup.getId()).setLong("pid", policy.getId())
                    .setCacheMode(CacheMode.IGNORE).scroll(ScrollMode.FORWARD_ONLY);
            while (devices.next()) {
                Device device = (Device) devices.get(0);
                policy.check(device, session);
                session.flush();
                session.evict(device);
            }
        }
        session.getTransaction().commit();
        this.status = Status.SUCCESS;
    } catch (Exception e) {
        try {
            session.getTransaction().rollback();
        } catch (Exception e1) {

        }
        logger.error("Error while checking compliance.", e);
        this.logIt("Error while checking compliance: " + e.getMessage(), 2);
        this.status = Status.FAILURE;
        return;
    } finally {
        session.close();
    }
}

From source file:onl.netfishers.netshot.work.tasks.CheckGroupSoftwareTask.java

License:Open Source License

@Override
public void run() {
    logger.debug("Starting check software compliance and hardware support status task for group {}.",
            deviceGroup.getId());/*from ww  w . j ava2  s .c o m*/
    this.logIt(String.format("Check software compliance task for group %s.", deviceGroup.getName()), 5);

    Session session = Database.getSession();
    try {
        logger.debug("Retrieving the software rules");
        @SuppressWarnings("unchecked")
        List<SoftwareRule> softwareRules = session.createCriteria(SoftwareRule.class)
                .addOrder(Property.forName("priority").asc()).list();
        logger.debug("Retrieving the hardware rules");
        @SuppressWarnings("unchecked")
        List<HardwareRule> hardwareRules = session.createCriteria(HardwareRule.class).list();

        session.beginTransaction();
        ScrollableResults devices = session
                .createQuery("select d from DeviceGroup g join g.cachedDevices d where g.id = :id")
                .setLong("id", deviceGroup.getId()).setCacheMode(CacheMode.IGNORE)
                .scroll(ScrollMode.FORWARD_ONLY);
        while (devices.next()) {
            Device device = (Device) devices.get(0);
            device.setSoftwareLevel(ConformanceLevel.UNKNOWN);
            for (SoftwareRule rule : softwareRules) {
                rule.check(device);
                if (device.getSoftwareLevel() != ConformanceLevel.UNKNOWN) {
                    break;
                }
            }
            device.resetEoX();
            for (HardwareRule rule : hardwareRules) {
                rule.check(device);
            }
            session.save(device);
            session.flush();
            session.evict(device);
        }
        session.getTransaction().commit();
        this.status = Status.SUCCESS;
    } catch (Exception e) {
        try {
            session.getTransaction().rollback();
        } catch (Exception e1) {

        }
        logger.error("Error while checking compliance.", e);
        this.logIt("Error while checking compliance: " + e.getMessage(), 2);
        this.status = Status.FAILURE;
        return;
    } finally {
        session.close();
    }
}

From source file:onl.netfishers.netshot.work.tasks.PurgeDatabaseTask.java

License:Open Source License

@Override
public void run() {
    logger.debug("Starting cleanup process.");

    {/* w  w  w .j av  a2  s  .  com*/
        Session session = Database.getSession();
        try {
            session.beginTransaction();
            logger.trace("Cleaning up tasks finished more than {} days ago...", days);
            this.logIt(String.format("Cleaning up tasks more than %d days ago...", days), 5);
            Calendar when = Calendar.getInstance();
            when.add(Calendar.DATE, -1 * days);
            ScrollableResults tasks = session
                    .createQuery("from Task t where (t.status = :cancelled or t.status = :failure "
                            + "or t.status = :success) and (t.executionDate < :when)")
                    .setParameter("cancelled", Task.Status.CANCELLED)
                    .setParameter("failure", Task.Status.FAILURE).setParameter("success", Task.Status.SUCCESS)
                    .setDate("when", when.getTime()).setCacheMode(CacheMode.IGNORE)
                    .scroll(ScrollMode.FORWARD_ONLY);
            int count = 0;
            while (tasks.next()) {
                Task task = (Task) tasks.get(0);
                session.delete(task);
                if (++count % 50 == 0) {
                    session.flush();
                    session.clear();
                }
            }
            session.getTransaction().commit();
            logger.trace("Cleaning up done on tasks, {} entries affected.", count);
            this.logIt(String.format("Cleaning up done on tasks, %d entries affected.", count), 5);
        } catch (HibernateException e) {
            try {
                session.getTransaction().rollback();
            } catch (Exception e1) {

            }
            logger.error("Database error while purging the old tasks from the database.", e);
            this.logIt("Database error during the task purge.", 1);
            this.status = Status.FAILURE;
            return;
        } catch (Exception e) {
            try {
                session.getTransaction().rollback();
            } catch (Exception e1) {

            }
            logger.error("Error while purging the old tasks from the database.", e);
            this.logIt("Error during the task purge.", 1);
            this.status = Status.FAILURE;
            return;
        } finally {
            session.close();
        }
    }

    if (configDays > 0) {
        Session session = Database.getSession();
        try {
            session.beginTransaction();
            logger.trace("Cleaning up configurations taken more than {} days ago...", configDays);
            this.logIt(String.format("Cleaning up configurations older than %d days...", configDays), 5);
            Calendar when = Calendar.getInstance();
            when.add(Calendar.DATE, -1 * configDays);
            Query query;
            if (configSize > 0) {
                query = session.createQuery(
                        "select c from Config c join c.attributes a where (a.class = ConfigLongTextAttribute) group by c.id having (max(length(a.longText.text)) > :size) and (c.changeDate < :when) order by c.device asc, c.changeDate desc")
                        .setInteger("size", configSize * 1024);
            } else {
                query = session.createQuery(
                        "from Config c where (c.changeDate < :when) order by c.device asc, c.changeDate desc");
            }
            ScrollableResults configs = query.setCalendar("when", when).setCacheMode(CacheMode.IGNORE)
                    .scroll(ScrollMode.FORWARD_ONLY);
            long dontDeleteDevice = -1;
            Date dontDeleteBefore = null;
            int count = 0;
            while (configs.next()) {
                try {
                    Config config = (Config) configs.get(0);
                    if ((config.getDevice().getLastConfig() != null
                            && config.getDevice().getLastConfig().getId() == config.getId())
                            || (dontDeleteBefore != null && config.getChangeDate().before(dontDeleteBefore))
                            || (configKeepDays > 0 && dontDeleteDevice != config.getDevice().getId())) {
                        if (configKeepDays > 0) {
                            Calendar limitCalendar = Calendar.getInstance();
                            limitCalendar.setTime(config.getChangeDate());
                            limitCalendar.add(Calendar.DATE, -1 * configKeepDays);
                            dontDeleteBefore = limitCalendar.getTime();
                        }
                    } else {
                        session.delete(config);
                        if (++count % 30 == 0) {
                            session.flush();
                            session.clear();
                        }
                    }
                    dontDeleteDevice = config.getDevice().getId();
                } catch (NullPointerException e1) {
                }
            }
            session.getTransaction().commit();
            logger.trace("Cleaning up done on configurations, {} entries affected.", count);
            this.logIt(String.format("Cleaning up done on configurations, %d entries affected.", count), 5);
        } catch (HibernateException e) {
            try {
                session.getTransaction().rollback();
            } catch (Exception e1) {

            }
            logger.error("Database error while purging the old configurations from the database.", e);
            this.logIt("Database error during the configuration purge.", 1);
            this.status = Status.FAILURE;
            return;
        } catch (Exception e) {
            try {
                session.getTransaction().rollback();
            } catch (Exception e1) {

            }
            logger.error("Error while purging the old configurations from the database.", e);
            this.logIt("Error during the configuration purge.", 1);
            this.status = Status.FAILURE;
            return;
        } finally {
            session.close();
        }
    }

    this.status = Status.SUCCESS;
    logger.trace("Cleaning up process finished.");
}

From source file:org.apereo.portal.events.handlers.db.JpaPortalEventStore.java

License:Apache License

@Override
@RawEventsTransactional//from www  .java  2  s .c  o m
public boolean aggregatePortalEvents(DateTime startTime, DateTime endTime, int maxEvents,
        Function<PortalEvent, Boolean> handler) {
    final Session session = this.getEntityManager().unwrap(Session.class);
    session.setFlushMode(FlushMode.COMMIT);
    final org.hibernate.Query query = session.createQuery(this.selectUnaggregatedQuery);
    query.setParameter(this.startTimeParameter.getName(), startTime);
    query.setParameter(this.endTimeParameter.getName(), endTime);
    if (maxEvents > 0) {
        query.setMaxResults(maxEvents);
    }

    int resultCount = 0;
    for (final ScrollableResults results = query.scroll(ScrollMode.FORWARD_ONLY); results.next();) {
        final PersistentPortalEvent persistentPortalEvent = (PersistentPortalEvent) results.get(0);
        final PortalEvent portalEvent;
        try {
            portalEvent = this.toPortalEvent(persistentPortalEvent.getEventData(),
                    persistentPortalEvent.getEventType());
        } catch (RuntimeException e) {
            this.logger.warn("Failed to convert PersistentPortalEvent to PortalEvent: " + persistentPortalEvent,
                    e);

            //Mark the event as error and store the mark to prevent trying to reprocess the broken event data
            persistentPortalEvent.setErrorAggregating(true);
            session.persist(persistentPortalEvent);

            continue;
        }

        try {

            final Boolean eventHandled = handler.apply(portalEvent);
            if (!eventHandled) {
                this.logger.debug("Aggregation stop requested before processing event {}", portalEvent);
                return false;
            }

            //Mark the event as aggregated and store the mark
            persistentPortalEvent.setAggregated(true);
            session.persist(persistentPortalEvent);

            //periodic flush and clear of session to manage memory demands
            if (++resultCount % this.flushPeriod == 0) {
                this.logger.debug("Aggregated {} events, flush and clear {} EntityManager.", resultCount,
                        PERSISTENCE_UNIT_NAME);
                session.flush();
                session.clear();
            }

        } catch (Exception e) {
            this.logger.warn("Failed to aggregate portal event: " + persistentPortalEvent, e);
            //mark the event as erred and move on. This will not be picked up by processing again
            persistentPortalEvent.setErrorAggregating(true);
            session.persist(persistentPortalEvent);
        }
    }

    return true;
}

From source file:org.apereo.portal.events.handlers.db.JpaPortalEventStore.java

License:Apache License

@Override
public void getPortalEvents(DateTime startTime, DateTime endTime, int maxEvents,
        FunctionWithoutResult<PortalEvent> handler) {
    final Session session = this.getEntityManager().unwrap(Session.class);
    final org.hibernate.Query query = session.createQuery(this.selectQuery);
    query.setParameter(this.startTimeParameter.getName(), startTime);
    query.setParameter(this.endTimeParameter.getName(), endTime);
    if (maxEvents > 0) {
        query.setMaxResults(maxEvents);//from  w ww  . jav  a2 s.  c o  m
    }

    for (final ScrollableResults results = query.scroll(ScrollMode.FORWARD_ONLY); results.next();) {
        final PersistentPortalEvent persistentPortalEvent = (PersistentPortalEvent) results.get(0);
        final PortalEvent portalEvent = this.toPortalEvent(persistentPortalEvent.getEventData(),
                persistentPortalEvent.getEventType());
        handler.apply(portalEvent);
        persistentPortalEvent.setAggregated(true);
        session.evict(persistentPortalEvent);
    }
}