Example usage for org.springframework.transaction.annotation Propagation REQUIRES_NEW

List of usage examples for org.springframework.transaction.annotation Propagation REQUIRES_NEW

Introduction

In this page you can find the example usage for org.springframework.transaction.annotation Propagation REQUIRES_NEW.

Prototype

Propagation REQUIRES_NEW

To view the source code for org.springframework.transaction.annotation Propagation REQUIRES_NEW.

Click Source Link

Document

Create a new transaction, and suspend the current transaction if one exists.

Usage

From source file:cn.org.once.cstack.service.impl.MessageServiceImpl.java

@Override
@Transactional(propagation = Propagation.REQUIRES_NEW)
public void delete(Message message) throws ServiceException {
    try {//from   ww  w  .  j a v a  2s .  co  m
        messageDAO.delete(message);
    } catch (PersistenceException e) {
        throw new ServiceException(e.getLocalizedMessage(), e);
    }
}

From source file:be.peerassistedlearning.repository.RoomRepositoryTest.java

@Test
@Transactional(propagation = Propagation.REQUIRES_NEW)
public void testUpdate() {
    Room r1 = new Room("2.25", Campus.PROXIMUS, RoomType.COMPUTER);

    repository.save(r1);// w  ww . j av a2  s .  co m

    r1.setName("2.26");

    repository.save(r1);

    Room c2 = repository.findOne(r1.getId());

    assertEquals(c2.getName(), "2.26");
}

From source file:be.peerassistedlearning.repository.CourseRepositoryTest.java

@Test
@Transactional(propagation = Propagation.REQUIRES_NEW)
public void testUpdate() {
    Course c1 = new Course("MBI80x", ".NET Programmeren", ".NET", Curriculum.TI, 3);

    repository.save(c1);/*from w w  w.j ava 2s  .c  om*/

    c1.setName(".NET Programmeren in Visual Studio");

    repository.save(c1);

    Course c2 = repository.findOne(c1.getId());

    assertEquals(c2.getName(), ".NET Programmeren in Visual Studio");
}

From source file:com.sonoport.service.account.jpa.UserServiceImpl.java

@Override
@Transactional(propagation = Propagation.REQUIRES_NEW)
public void delete(User user) {
    // EclipseLink JPA requires a merge() before a remove()
    User toDelete = entityManager.merge(user);
    entityManager.remove(toDelete);/*from w  w w.j  ava2  s  .  c  om*/
}

From source file:org.schedoscope.metascope.task.MetastoreTask.java

@Override
@Transactional(propagation = Propagation.REQUIRES_NEW)
public boolean run(long start) {
    LOG.info("Sync repository with metastore");
    HiveConf conf = new HiveConf();
    conf.set("hive.metastore.local", "false");
    conf.setVar(HiveConf.ConfVars.METASTOREURIS, config.getMetastoreThriftUri());
    String principal = config.getKerberosPrincipal();
    if (principal != null && !principal.isEmpty()) {
        conf.setBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL, true);
        conf.setVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL, principal);
        conf.set("hadoop.security.authentication", "kerberos");
        UserGroupInformation.setConfiguration(conf);
    }//from   w  w  w.j  a  v  a2s .com

    HiveMetaStoreClient client = null;
    try {
        client = new HiveMetaStoreClient(conf);
    } catch (Exception e) {
        LOG.info("[MetastoreSyncTask] FAILED: Could not connect to hive metastore", e);
        return false;
    }

    FileSystem fs;
    try {
        Configuration hadoopConfig = new Configuration();
        hadoopConfig.set("fs.defaultFS", config.getHdfs());
        fs = FileSystem.get(hadoopConfig);
    } catch (IOException e) {
        LOG.info("[MetastoreSyncTask] FAILED: Could not connect to HDFS", e);
        client.close();
        return false;
    }

    LOG.info("Connected to metastore (" + config.getMetastoreThriftUri() + ")");

    List<String> allTables = metascopeTableRepository.getAllTablesNames();

    for (String fqdn : allTables) {
        //load table
        MetascopeTable table = metascopeTableRepository.findOne(fqdn);
        LOG.info("Get metastore information for table " + table.getFqdn());

        try {
            Table mTable = client.getTable(table.getDatabaseName(), table.getTableName());
            List<Partition> partitions = client.listPartitions(table.getDatabaseName(), table.getTableName(),
                    Short.MAX_VALUE);

            table.setTableOwner(mTable.getOwner());
            table.setCreatedAt(mTable.getCreateTime() * 1000L);
            table.setInputFormat(mTable.getSd().getInputFormat());
            table.setOutputFormat(mTable.getSd().getOutputFormat());
            table.setDataPath(mTable.getSd().getLocation());
            try {
                table.setDataSize(getDirectorySize(fs, table.getDataPath()));
                table.setPermissions(getPermission(fs, table.getDataPath()));
            } catch (IllegalArgumentException e) {
                LOG.warn("Could not retrieve dir size: " + e.getMessage());
                LOG.debug("ERROR: Could not read HDFS metadata", e);
            }

            long maxLastTransformation = -1;

            Hibernate.initialize(table.getViews());
            table.setViewsSize(table.getViews().size());

            for (Partition partition : partitions) {
                MetascopeView view = getView(table.getViews(), partition);
                if (view == null) {
                    //a view which is not registered as a partition in hive metastore should not exists ...
                    continue;
                }
                String numRows = partition.getParameters().get("numRows");
                if (numRows != null) {
                    view.setNumRows(Long.parseLong(numRows));
                }
                String totalSize = partition.getParameters().get("totalSize");
                if (totalSize != null) {
                    view.setTotalSize(Long.parseLong(totalSize));
                }
                String lastTransformation = partition.getParameters().get(SCHEDOSCOPE_TRANSFORMATION_TIMESTAMP);
                if (lastTransformation != null) {
                    long ts = Long.parseLong(lastTransformation);
                    view.setLastTransformation(ts);
                    if (ts > maxLastTransformation) {
                        maxLastTransformation = ts;
                    }
                }
                solrFacade.updateViewEntity(view, false);
            }

            if (maxLastTransformation != -1) {
                table.setLastTransformation(maxLastTransformation);
            } else {
                String ts = mTable.getParameters().get(SCHEDOSCOPE_TRANSFORMATION_TIMESTAMP);
                if (ts != null) {
                    long lastTransformationTs = Long.parseLong(ts);
                    table.setLastTransformation(lastTransformationTs);
                    MetascopeView rootView = table.getViews().get(0);
                    rootView.setLastTransformation(lastTransformationTs);
                    solrFacade.updateViewEntity(rootView, false);
                }
            }

            metascopeTableRepository.save(table);
            solrFacade.updateTablePartial(table, true);
        } catch (Exception e) {
            LOG.warn("Could not retrieve table from metastore", e);
            continue;
        }

    }

    /* commit to index */
    solrFacade.commit();

    client.close();
    try {
        fs.close();
    } catch (IOException e) {
        LOG.warn("Could not close connection to HDFS", e);
    }

    LOG.info("Sync with metastore finished");
    return true;
}

From source file:es.emergya.bbdd.dao.ClienteConectadoHome.java

@Transactional(propagation = Propagation.REQUIRES_NEW, readOnly = false, rollbackFor = Throwable.class)
public void persist(ClienteConectado transientInstance) {
    log.debug("persisting ClienteConectado instance");
    try {//from  w ww.  j av a 2 s . co  m
        getSession().persist(transientInstance);
        log.debug("persist successful");
    } catch (RuntimeException re) {
        log.error("persist failed", re);
        throw re;
    }
}

From source file:org.fuin.auction.query.server.AuctionQueryServiceImpl.java

@Override
@Transactional(propagation = Propagation.REQUIRES_NEW)
public final List<CategoryDto> findAllActiveCategories() {
    final List<Category> categories = categoryDao.findAllActive();
    final List<CategoryDto> result = new ArrayList<CategoryDto>();
    for (final Category category : categories) {
        // TODO michael Create domain object to dto converter structure
        result.add(new CategoryDto(category.getId(), category.getName(), category.isActive()));
    }/*from   w w w.  j ava 2 s  .c om*/
    return result;
}

From source file:com.orange.clara.cloud.servicedbdumper.task.job.JobFactory.java

@Transactional(propagation = Propagation.REQUIRES_NEW)
public Job createJobWithDatabaseRefSrc(JobType jobType, DatabaseRef databaseRefSrc,
        DbDumperServiceInstance dbDumperServiceInstance) {
    Job job = new Job(jobType, databaseRefSrc, dbDumperServiceInstance);
    if (this.jobRepo.findByJobTypeAndJobEventAndDatabaseRefSrc(jobType, JobEvent.START, databaseRefSrc)
            .size() > 0/*from  w w  w  .ja  va  2  s  .c o  m*/
            || this.jobRepo.findByJobTypeAndJobEventAndDatabaseRefSrc(jobType, JobEvent.RUNNING, databaseRefSrc)
                    .size() > 0) {
        job.setJobEvent(JobEvent.SCHEDULED);
        this.logger.info(String.format("Job type: %s for database source '%s' has been scheduled.",
                jobType.toString(), databaseRefSrc.getDatabaseName()));
    }
    return this.jobRepo.save(job);
}

From source file:fi.hsl.parkandride.back.LockDao.java

@Override
@Transactional(propagation = Propagation.REQUIRES_NEW, isolation = Isolation.SERIALIZABLE)
public boolean releaseLock(Lock lock) {
    validationService.validate(lock);/*from   w  w w . j a v  a 2s.c  o m*/
    if (ownerName.equals(lock.owner)) {
        return deleteLock(lock) == 1;
    } else {
        throw new LockException("Cannot release lock. Lock is not owned by this node.");
    }
}

From source file:cs544.blog.service.BlogService.java

@Transactional(propagation = Propagation.REQUIRES_NEW)
@Override
public void deleteUser(long userId) {
    userDAO.deleteUser(userId);
}