Example usage for org.hibernate LockMode PESSIMISTIC_WRITE

List of usage examples for org.hibernate LockMode PESSIMISTIC_WRITE

Introduction

In this page you can find the example usage for org.hibernate LockMode PESSIMISTIC_WRITE.

Prototype

LockMode PESSIMISTIC_WRITE

To view the source code for org.hibernate LockMode PESSIMISTIC_WRITE.

Click Source Link

Document

Transaction will obtain a database lock immediately.

Usage

From source file:com.emergya.persistenceGeo.dao.impl.GenericHibernateDAOImpl.java

License:Open Source License

public T findById(Long id, boolean lock) {
    T entity;/*w  w w .  j av a  2s . c  o m*/
    if (lock) {
        entity = (T) getHibernateTemplate().get(persistentClass, id, LockMode.PESSIMISTIC_WRITE);
    } else {
        entity = (T) getHibernateTemplate().get(persistentClass, id);
    }

    return entity;
}

From source file:com.evolveum.midpoint.repo.sql.helpers.ObjectRetriever.java

License:Apache License

public <T extends ObjectType> PrismObject<T> getObjectInternal(Session session, Class<T> type, String oid,
        Collection<SelectorOptions<GetOperationOptions>> options, boolean lockForUpdate,
        OperationResult operationResult)
        throws ObjectNotFoundException, SchemaException, DtoTranslationException {

    boolean lockedForUpdateViaHibernate = false;
    boolean lockedForUpdateViaSql = false;

    LockOptions lockOptions = new LockOptions();
    //todo fix lock for update!!!!!
    if (lockForUpdate) {
        if (getConfiguration().isLockForUpdateViaHibernate()) {
            lockOptions.setLockMode(LockMode.PESSIMISTIC_WRITE);
            lockedForUpdateViaHibernate = true;
        } else if (getConfiguration().isLockForUpdateViaSql()) {
            LOGGER.trace("Trying to lock object {} for update (via SQL)", oid);
            long time = System.currentTimeMillis();
            SQLQuery q = session.createSQLQuery("select oid from m_object where oid = ? for update");
            q.setString(0, oid);//from  w  w w .  j a  va 2 s.co  m
            Object result = q.uniqueResult();
            if (result == null) {
                return throwObjectNotFoundException(type, oid);
            }
            if (LOGGER.isTraceEnabled()) {
                LOGGER.trace("Locked via SQL (in {} ms)", System.currentTimeMillis() - time);
            }
            lockedForUpdateViaSql = true;
        }
    }

    if (LOGGER.isTraceEnabled()) {
        if (lockedForUpdateViaHibernate) {
            LOGGER.trace("Getting object {} with locking for update (via hibernate)", oid);
        } else if (lockedForUpdateViaSql) {
            LOGGER.trace("Getting object {}, already locked for update (via SQL)", oid);
        } else {
            LOGGER.trace("Getting object {} without locking for update", oid);
        }
    }

    GetObjectResult fullObject = null;
    if (!lockForUpdate) {
        Query query = session.getNamedQuery("get.object");
        query.setString("oid", oid);
        query.setResultTransformer(GetObjectResult.RESULT_TRANSFORMER);
        query.setLockOptions(lockOptions);

        fullObject = (GetObjectResult) query.uniqueResult();
    } else {
        // we're doing update after this get, therefore we load full object right now
        // (it would be loaded during merge anyway)
        // this just loads object to hibernate session, probably will be removed later. Merge after this get
        // will be faster. Read and use object only from fullObject column.
        // todo remove this later [lazyman]
        Criteria criteria = session.createCriteria(ClassMapper.getHQLTypeClass(type));
        criteria.add(Restrictions.eq("oid", oid));

        criteria.setLockMode(lockOptions.getLockMode());
        RObject obj = (RObject) criteria.uniqueResult();

        if (obj != null) {
            obj.toJAXB(prismContext, null).asPrismObject();
            fullObject = new GetObjectResult(obj.getFullObject(), obj.getStringsCount(), obj.getLongsCount(),
                    obj.getDatesCount(), obj.getReferencesCount(), obj.getPolysCount(), obj.getBooleansCount());
        }
    }

    LOGGER.trace("Got it.");
    if (fullObject == null) {
        throwObjectNotFoundException(type, oid);
    }

    LOGGER.trace("Transforming data to JAXB type.");
    PrismObject<T> prismObject = updateLoadedObject(fullObject, type, oid, options, session, operationResult);
    validateObjectType(prismObject, type);

    // this was implemented to allow report parsing errors as warnings to upper layers;
    // however, it causes problems when serialization problems are encountered: in such cases, we put
    // FATAL_ERROR to the result here, and it should be then removed or muted (which is a complication)
    // -- so, as the parsing errors are not implemented, we disabled this code as well

    //         subResult.computeStatusIfUnknown();
    //         if (subResult.isWarning() || subResult.isError() || subResult.isInProgress()) {
    //            prismObject.asObjectable().setFetchResult(subResult.createOperationResultType());
    //         }

    return prismObject;
}

From source file:com.evolveum.midpoint.repo.sql.SqlRepositoryServiceImpl.java

License:Apache License

private <T extends ObjectType> PrismObject<T> getObject(Session session, Class<T> type, String oid,
        Collection<SelectorOptions<GetOperationOptions>> options, boolean lockForUpdate)
        throws ObjectNotFoundException, SchemaException, DtoTranslationException, QueryException {

    boolean lockedForUpdateViaHibernate = false;
    boolean lockedForUpdateViaSql = false;

    LockOptions lockOptions = new LockOptions();
    //todo fix lock for update!!!!!
    if (lockForUpdate) {
        if (getConfiguration().isLockForUpdateViaHibernate()) {
            lockOptions.setLockMode(LockMode.PESSIMISTIC_WRITE);
            lockedForUpdateViaHibernate = true;
        } else if (getConfiguration().isLockForUpdateViaSql()) {
            if (LOGGER.isTraceEnabled()) {
                LOGGER.trace("Trying to lock object " + oid + " for update (via SQL)");
            }//from ww w  .ja v  a 2  s . c o  m
            long time = System.currentTimeMillis();
            SQLQuery q = session.createSQLQuery("select oid from m_object where oid = ? for update");
            q.setString(0, oid);
            Object result = q.uniqueResult();
            if (result == null) {
                return throwObjectNotFoundException(type, oid);
            }
            if (LOGGER.isTraceEnabled()) {
                LOGGER.trace("Locked via SQL (in " + (System.currentTimeMillis() - time) + " ms)");
            }
            lockedForUpdateViaSql = true;
        }
    }

    if (LOGGER.isTraceEnabled()) {
        if (lockedForUpdateViaHibernate) {
            LOGGER.trace("Getting object " + oid + " with locking for update (via hibernate)");
        } else if (lockedForUpdateViaSql) {
            LOGGER.trace("Getting object " + oid + ", already locked for update (via SQL)");
        } else {
            LOGGER.trace("Getting object " + oid + " without locking for update");
        }
    }

    GetObjectResult fullObject = null;
    if (!lockForUpdate) {
        Query query = session.getNamedQuery("get.object");
        query.setString("oid", oid);
        query.setResultTransformer(GetObjectResult.RESULT_TRANSFORMER);
        query.setLockOptions(lockOptions);

        fullObject = (GetObjectResult) query.uniqueResult();
    } else {
        // we're doing update after this get, therefore we load full object right now
        // (it would be loaded during merge anyway)
        // this just loads object to hibernate session, probably will be removed later. Merge after this get
        // will be faster. Read and use object only from fullObject column.
        // todo remove this later [lazyman]
        Criteria criteria = session.createCriteria(ClassMapper.getHQLTypeClass(type));
        criteria.add(Restrictions.eq("oid", oid));

        criteria.setLockMode(lockOptions.getLockMode());
        RObject obj = (RObject) criteria.uniqueResult();

        if (obj != null) {
            obj.toJAXB(getPrismContext(), null).asPrismObject();
            fullObject = new GetObjectResult(obj.getFullObject(), obj.getStringsCount(), obj.getLongsCount(),
                    obj.getDatesCount(), obj.getReferencesCount(), obj.getPolysCount());
        }
    }

    LOGGER.trace("Got it.");
    if (fullObject == null) {
        throwObjectNotFoundException(type, oid);
    }

    LOGGER.trace("Transforming data to JAXB type.");
    PrismObject<T> prismObject = updateLoadedObject(fullObject, type, options, session);
    validateObjectType(prismObject, type);

    return prismObject;
}

From source file:com.lighting.platform.base.dao.SimpleHibernateDao.java

License:Apache License

/**
 * ??,  select ... for update//from  ww w. j  ava2  s  .  c  o m
 * ?, ???. 
 */
public void pessimisticLock(T entity) {
    LockOptions option = new LockOptions(LockMode.PESSIMISTIC_WRITE);
    getSession().buildLockRequest(option).lock(entity);
}

From source file:com.literatejava.hibernate.allocator.LinearBlockAllocator.java

License:Open Source License

public void configure(Type type, Properties params, Dialect dialect) {
    ObjectNameNormalizer normalizer = (ObjectNameNormalizer) params.get(IDENTIFIER_NORMALIZER);

    this.tableName = ConfigurationHelper.getString(ALLOC_TABLE, params, DEFAULT_TABLE);
    this.sequenceColumn = ConfigurationHelper.getString(SEQUENCE_COLUMN, params, DEFAULT_SEQUENCE_COLUMN);
    this.allocColumn = ConfigurationHelper.getString(ALLOC_COLUMN, params, DEFAULT_ALLOC_COLUMN);

    // get SequenceName;    default to Entities' TableName.
    //      -/*from  w w w  .  j a v a  2  s  .  co m*/
    this.sequenceName = ConfigurationHelper.getString(SEQUENCE_NAME, params,
            params.getProperty(PersistentIdentifierGenerator.TABLE));
    if (sequenceName == null) {
        throw new IdentifierGenerationException(
                "LinearBlockAllocator: '" + SEQUENCE_NAME + "' must be specified");
    }

    this.blockSize = ConfigurationHelper.getInt(BLOCK_SIZE, params, DEFAULT_BLOCK_SIZE);
    if (blockSize < 1) {
        blockSize = 1;
    }

    // qualify Table Name, if necessary;
    //      --
    if (tableName.indexOf('.') < 0) {
        String schemaName = normalizer.normalizeIdentifierQuoting(params.getProperty(SCHEMA));
        String catalogName = normalizer.normalizeIdentifierQuoting(params.getProperty(CATALOG));
        this.tableName = Table.qualify(catalogName, schemaName, tableName);
    }

    // build SQL strings;
    //      -- use appendLockHint(LockMode) for now, as that is how Hibernate's generators do it.
    //
    this.query = "select " + allocColumn + " from "
            + dialect.appendLockHint(LockMode.PESSIMISTIC_WRITE, tableName) + " where " + sequenceColumn
            + " = ?" + dialect.getForUpdateString();
    this.update = "update " + tableName + " set " + allocColumn + " = ? where " + sequenceColumn + " = ? and "
            + allocColumn + " = ?";

    // setup Return Type & Result Holder.
    //      --
    this.returnType = type;
    this.returnClass = type.getReturnedClass();
    this.resultFactory = IdentifierGeneratorHelper.getIntegralDataTypeHolder(returnClass);

    // done.
}

From source file:com.mysema.query.HibernateBase.java

License:Apache License

@Test
public void LockMode() {
    query().from(QCat.cat).setLockMode(QCat.cat, LockMode.PESSIMISTIC_WRITE).list(QCat.cat);
}

From source file:com.querydsl.jpa.HibernateBase.java

License:Apache License

@Test
public void LockMode() {
    query().from(cat).setLockMode(cat, LockMode.PESSIMISTIC_WRITE).select(cat).fetch();
}

From source file:com.sam.moca.dao.hibernate.AbstractUnknownKeyHibernateDAO.java

License:Open Source License

/**
 * This will lock the given and update at the same time.  Note that any
 * children objects may possibly be stale and if needed this object
 * should be reread after locking.//from w ww . j  a va2s . c  o  m
 * Depending on the database provider there is no guarantee as to whether
 * the wait argument is paid attention to.
 * @param obj The object to lock the row on
 * @param wait Whether to wait for the lock or timeout
 * @return whether or not the lock was obtained
 */
public boolean lockRow(T obj, boolean wait) {
    try {
        LockOptions opts = new LockOptions();
        if (wait) {
            opts.setLockMode(LockMode.PESSIMISTIC_WRITE);
            opts.setTimeOut(LockOptions.WAIT_FOREVER);
        } else {
            opts.setLockMode(LockMode.UPGRADE_NOWAIT);
            opts.setTimeOut(LockOptions.NO_WAIT);
        }
        HibernateTools.getSession().refresh(obj, opts);
        return true;
    } catch (LockAcquisitionException e) {
        return false;
    }
}

From source file:de.innovationgate.webgate.api.jdbc.WGDatabaseImpl.java

License:Open Source License

private Long upgradeFileStorage(Logger log) throws WGAPIException {

    _ugradeFileStorageRunning = true;//from www.ja va  2s. com
    try {

        if (_csVersion.getVersion() < 5.0 || _csVersion.getPatchLevel() < 4) {
            log.error("This task needs a content store of version 5.0 patch level 4 or higher");
            return 0L;
        }

        CS5P4FileHandling fileHandling = ((CS5P4FileHandling) _fileHandling);
        long freedMemory = 0;

        while (true) {
            String metaHql = "from ContentFileMeta as meta where meta.checksumSha512 is null";
            Iterator oldMetas = getSession().createQuery(metaHql).iterate();
            try {
                int counter = 0;
                if (!oldMetas.hasNext()) {
                    break;
                }

                while (oldMetas.hasNext() && counter < 100) {
                    ContentFileMeta meta = (ContentFileMeta) oldMetas.next();
                    getSession().setReadOnly(meta, false);
                    LockRequest lockRequest = getSession()
                            .buildLockRequest(new LockOptions(LockMode.PESSIMISTIC_WRITE));
                    lockRequest.lock(meta);

                    try {

                        // Just-for-sure check if this is really not yet migrated
                        getSession().refresh(meta);
                        if (meta.getChecksumSha512() != null) {
                            rollbackHibernateTransaction(false);
                            continue;
                        }

                        log.info("Database: " + getDb().getDbReference() + ": Upgrading storage of file '"
                                + meta.getName() + "' from document '" + meta.getParentcontent().getTitle()
                                + "' (" + meta.getParentcontent().getStructentry().getKey() + "."
                                + meta.getParentcontent().getLanguage().getName() + "."
                                + meta.getParentcontent().getVersion() + ")");

                        // Select file parts
                        String hqlQuery = "select cfp from ContentFilePart as cfp where cfp.meta=:metaEntity order by cfp.partnr asc";
                        Query query = getSession().createQuery(hqlQuery);
                        query.setParameter("metaEntity", meta);

                        // Migrate file parts to filecontents parts
                        InputStream in = new HibernateQueryInputStream(fileHandling.getParent().getSession(),
                                query, 0, isOptimizedFileHandlingDisableQueryPaging());
                        try {
                            fileHandling.storeFileContents(meta, new CS5P4ContentFileDescriptor(), in);
                        } finally {
                            in.close();
                        }

                        // Delete file parts
                        Query deletionQuery = getSession()
                                .createQuery("delete ContentFilePart cfp where cfp.meta = :meta");
                        deletionQuery.setEntity("meta", meta);
                        deletionQuery.executeUpdate();

                        // Commit so we can read the file afterwards
                        commitHibernateTransaction();

                        /*
                        // Annotate the file
                        WGDocumentImpl doc = createDocumentImpl(meta.getParentcontent());
                        TemporaryFile tempFile = new TemporaryFile(meta.getName(), doc.getFileData(meta.getName()), WGFactory.getTempDir());
                        try {
                        WGFileMetaData md = new WGFileMetaData(new WGDocument.FakeMetaDataContext(), meta.getName(), meta.getSize(), meta.getCreated(), meta.getLastmodified(), meta.getChecksum(), meta.getChecksumSha512(), fileHandling.loadMdExtensionData(doc, meta));
                        _db.annotateMetadata(tempFile.getFile(), md, null);
                        fileHandling.storeMdExtensionData(doc, md, meta);
                        if (isSaveIsolationActive()) {
                            getSession().update(meta); // This will not be able to store binary extension data, which however cannot be present before upgrading the file storage
                        }
                        }
                        finally {
                        tempFile.delete();
                        }
                        commitHibernateTransaction();
                        */
                    } catch (Throwable e) {
                        log.error("Exception upgrading file", e);
                        rollbackHibernateTransaction(false);
                    }
                    counter++;
                }

                log.info("Clearing session cache");
                refresh();
                log.info("Running file storage maintenance to remove duplicate file data");
                freedMemory += dailyMaintenance(log);
            } finally {
                Hibernate.close(oldMetas);
            }
        }

        log.info("Database: " + getDb().getDbReference() + ": Upgrading file storage freed "
                + WGUtils.DECIMALFORMAT_STANDARD.format(freedMemory / 1024 / 1024)
                + " MB of file storage memory.");
        return freedMemory;

    } finally {
        _ugradeFileStorageRunning = false;
    }

}

From source file:edu.utah.further.core.data.service.DaoHibernateImpl.java

License:Apache License

/**
 * Re-read the state of the given persistent instance.
 * /*from  w ww.  j  a v  a  2s .  c o  m*/
 * @param domain
 *            the persistent instance to re-read
 * @see edu.utah.further.core.api.data.Dao#refresh(edu.utah.further.core.api.data.PersistentEntity)
 */
@Override
public <T extends PersistentEntity<?>> void refresh(final T domain) {
    getHibernateTemplate().refresh(domain, LockMode.PESSIMISTIC_WRITE);
}