List of usage examples for org.springframework.transaction.annotation Propagation REQUIRES_NEW
Propagation REQUIRES_NEW
To view the source code for org.springframework.transaction.annotation Propagation REQUIRES_NEW.
Click Source Link
From source file:es.emergya.bbdd.dao.StreetHome.java
@SuppressWarnings("unchecked") @Transactional(readOnly = true, rollbackFor = Throwable.class, propagation = Propagation.REQUIRES_NEW) public Collection<Street> getAllMatching(String expression) { Collection<Street> res = new LinkedList<Street>(); Session currentSession = getSession(); currentSession.flush();//from w w w. j a v a 2 s.com Criteria criteria = currentSession.createCriteria(Street.class) .add(Restrictions.ilike("nombreviaine", expression)).addOrder(Order.asc("nombreviaine")) .setResultTransformer(Criteria.DISTINCT_ROOT_ENTITY); res = criteria.list(); return res; }
From source file:be.peerassistedlearning.repository.StudentRepositoryTest.java
@Test @Transactional(propagation = Propagation.REQUIRES_NEW) public void testUpdate() { Student s1 = new Student("David", "paswoord", "davidopdebeeck@hotmail.com", Curriculum.TI, "david", UserType.ADMIN);//from w w w .j av a 2 s .c o m repository.save(s1); s1.setName("Cedric"); repository.save(s1); Student s2 = repository.findOne(s1.getId()); assertEquals(s2.getName(), "Cedric"); }
From source file:es.emergya.bbdd.dao.BandejaEntradaHome.java
@SuppressWarnings("unchecked") @Transactional(propagation = Propagation.REQUIRES_NEW, readOnly = true, rollbackFor = Throwable.class) public List<Inbox> getNotProcessed() { Session currentSession = getSession(); currentSession.clear();//from w w w . j a v a 2 s.c om return (List<Inbox>) getSession().createCriteria(Inbox.class).add(Restrictions.eq("procesado", false)) .addOrder(Order.asc("marcaTemporal")).setResultTransformer(Criteria.DISTINCT_ROOT_ENTITY).list(); }
From source file:org.schedoscope.metascope.task.metastore.MetastoreTask.java
@Override @Transactional(propagation = Propagation.REQUIRES_NEW) public boolean run(RawJDBCSqlRepository sqlRepository, long start) { LOG.info("Sync repository with metastore"); metastoreClient.init();/*from ww w. j a va 2s . c o m*/ FileSystem fs; try { Configuration hadoopConfig = new Configuration(); hadoopConfig.set("fs.defaultFS", config.getHdfs()); fs = FileSystem.get(hadoopConfig); } catch (IOException e) { LOG.info("[MetastoreSyncTask] FAILED: Could not connect to HDFS", e); metastoreClient.close(); return false; } Connection connection; try { connection = dataSource.getConnection(); } catch (SQLException e) { LOG.error("Could not retrieve database connection.", e); return false; } LOG.info("Connected to metastore (" + config.getMetastoreThriftUri() + ")"); List<MetascopeTable> allTables = sqlRepository.findAllTables(connection); for (MetascopeTable table : allTables) { LOG.info("Get metastore information for table " + table.getFqdn()); try { MetastoreTable mTable = metastoreClient.getTable(table.getDatabaseName(), table.getTableName()); if (mTable == null) { LOG.error("Could not retrieve table from metastore."); continue; } table.setTableOwner(mTable.getOwner()); table.setCreatedAt(mTable.getCreateTime() * 1000L); table.setInputFormat(mTable.getInputFormat()); table.setOutputFormat(mTable.getOutputFormat()); table.setDataPath(mTable.getLocation()); try { table.setDataSize(getDirectorySize(fs, table.getDataPath())); table.setPermissions(getPermission(fs, table.getDataPath())); } catch (IllegalArgumentException e) { LOG.warn("Could not retrieve dir size: " + e.getMessage()); LOG.debug("ERROR: Could not read HDFS metadata", e); } long maxLastTransformation = -1; List<String> partitionNames = metastoreClient.listPartitionNames(table.getDatabaseName(), table.getTableName(), (short) -1); List<MetascopeView> views = sqlRepository.findViews(connection, table.getFqdn()); List<List<String>> groupedPartitions = metastoreClient.partitionLists(partitionNames, 10000); for (List<String> groupedPartitionNames : groupedPartitions) { List<MetastorePartition> partitions = metastoreClient.listPartitions(table.getDatabaseName(), table.getTableName(), groupedPartitionNames); List<MetascopeView> changedViews = new ArrayList<>(); for (MetastorePartition partition : partitions) { MetascopeView view = getView(views, partition); if (view == null) { //a view which is not registered as a partition in hive metastore should not exists ... continue; } view.setTable(table); String numRows = partition.getNumRows(); if (numRows != null && !numRows.toUpperCase().equals("NULL") && !numRows.isEmpty()) { view.setNumRows(Long.parseLong(numRows)); } String totalSize = partition.getTotalSize(); if (totalSize != null && !totalSize.toUpperCase().equals("NULL") && !totalSize.isEmpty()) { view.setTotalSize(Long.parseLong(totalSize)); } String lastTransformation = partition.getSchedoscopeTimestamp(); if (lastTransformation != null && !lastTransformation.toUpperCase().equals("NULL") && !lastTransformation.isEmpty()) { long ts = Long.parseLong(lastTransformation); view.setLastTransformation(ts); if (ts > maxLastTransformation) { maxLastTransformation = ts; } } solrFacade.updateViewEntity(view, false); changedViews.add(view); } sqlRepository.insertOrUpdateViewMetadata(connection, changedViews); solrFacade.commit(); } if (maxLastTransformation != -1) { table.setLastTransformation(maxLastTransformation); } else { String ts = mTable.getSchedoscopeTimestamp();//mTable.getParameters().get(SCHEDOSCOPE_TRANSFORMATION_TIMESTAMP); if (ts != null) { long lastTransformationTs = Long.parseLong(ts); table.setLastTransformation(lastTransformationTs); MetascopeView rootView = views.get(0); rootView.setTable(table); rootView.setLastTransformation(lastTransformationTs); solrFacade.updateViewEntity(rootView, false); } } sqlRepository.saveTable(connection, table); solrFacade.updateTableMetastoreData(table, true); } catch (Exception e) { LOG.warn("Could not retrieve table from metastore", e); continue; } } /* commit to index */ solrFacade.commit(); metastoreClient.close(); try { fs.close(); } catch (IOException e) { LOG.warn("Could not close connection to HDFS", e); } try { connection.close(); } catch (SQLException e) { LOG.error("Could not close connection", e); } LOG.info("Sync with metastore finished"); return true; }
From source file:architecture.common.i18n.impl.DefaultI18nTextManager.java
@Transactional(readOnly = false, propagation = Propagation.REQUIRES_NEW) public void saveTexts(List<I18nText> list, String categoryName) { for (I18nText text : list) { if (text.getTextId() == -1L) { text.setCategoryName(categoryName); }//from www. j a va 2 s . c o m } saveTexts(list); }
From source file:fi.helsinki.opintoni.repository.CustomAuditEventRepository.java
@Bean public AuditEventRepository auditEventRepository() { return new AuditEventRepository() { @Inject//from w ww . j a v a 2 s. c om private AuditEventConverter auditEventConverter; @Override public List<AuditEvent> find(String principal, Date after) { Iterable<PersistentAuditEvent> persistentAuditEvents; if (principal == null && after == null) { persistentAuditEvents = persistenceAuditEventRepository.findAll(); } else if (after == null) { persistentAuditEvents = persistenceAuditEventRepository.findByPrincipal(principal); } else { persistentAuditEvents = persistenceAuditEventRepository .findByPrincipalAndAuditEventDateAfter(principal, new LocalDateTime(after)); } return auditEventConverter.convertToAuditEvent(persistentAuditEvents); } @Override @Transactional(propagation = Propagation.REQUIRES_NEW) public void add(AuditEvent event) { PersistentAuditEvent persistentAuditEvent = new PersistentAuditEvent(); persistentAuditEvent.setPrincipal(event.getPrincipal()); persistentAuditEvent.setAuditEventType(event.getType()); persistentAuditEvent.setAuditEventDate(new LocalDateTime(event.getTimestamp())); persistentAuditEvent.setData(auditEventConverter.convertDataToStrings(event.getData())); persistenceAuditEventRepository.save(persistentAuditEvent); } }; }
From source file:org.brekka.pegasus.core.services.impl.AllocationServiceImpl.java
@Override @Transactional(propagation = Propagation.REQUIRES_NEW, isolation = Isolation.REPEATABLE_READ) public void incrementDownloadCounter(final AllocationFile allocationFile) { FileType xml = allocationFile.getXml(); int maxDownloads = Integer.MAX_VALUE; if (xml.isSetMaxDownloads()) { maxDownloads = xml.getMaxDownloads(); }// w w w . j av a 2 s . c om AllocationFile managed = this.allocationFileDAO.retrieveById(allocationFile.getId()); int downloadCount = managed.getDownloadCount(); // Increment the downloads downloadCount++; managed.setDownloadCount(downloadCount); if (downloadCount == maxDownloads) { // Mark this file for deletion managed.setExpires(new Date()); } this.allocationFileDAO.update(managed); }
From source file:cs544.blog.service.BlogService.java
@Transactional(propagation = Propagation.REQUIRES_NEW) @Override//from ww w . j av a2 s.c om public List<User> getAllUser() { List<User> users = userDAO.getUserList(); // tx.commit(); return users; }
From source file:org.schedoscope.metascope.task.SchedoscopeTask.java
@Override @Transactional(propagation = Propagation.REQUIRES_NEW) public boolean run(RawJDBCSqlRepository sqlRepository, long start) { Map<String, MetascopeTable> cachedTables = new HashMap<>(); Map<String, MetascopeField> cachedFields = new HashMap<>(); Map<String, MetascopeView> cachedViews = new HashMap<>(); List<MetascopeView> viewsToPersist = new ArrayList<>(); List<Dependency> tableDependencies = new ArrayList<>(); List<Dependency> viewDependencies = new ArrayList<>(); List<Dependency> fieldDependencies = new ArrayList<>(); LOG.info("Retrieve and parse data from schedoscope instance \"" + schedoscopeInstance.getId() + "\""); Connection connection;/* ww w. j a va2s . c o m*/ try { connection = dataSource.getConnection(); } catch (SQLException e) { LOG.error("Could not retrieve database connection.", e); return false; } /** get data from schedoscope */ ViewStatus viewStatus; try { viewStatus = SchedoscopeUtil.getViewStatus(true, false, null, schedoscopeInstance.getHost(), schedoscopeInstance.getPort()); } catch (SchedoscopeConnectException e) { LOG.error("Could not retrieve view information", e); return false; } if (viewStatus == null) { LOG.info("[SchedoscopeSyncTask] FAILED: Schedoscope status information is not available"); return false; } if (viewStatus.getViews().size() == 0) { LOG.info("[SchedoscopeSyncTask] No schedoscope metadata available. Maybe materialize some views?"); sqlRepository.saveMetadata(connection, "schedoscopeTimestamp", String.valueOf(System.currentTimeMillis())); return false; } int size = viewStatus.getViews().size(); LOG.info("Received " + size + " views"); /** save tables to avoid foreign key constraint violation */ int tableCount = 0; for (View view : viewStatus.getViews()) { if (view.isTable() && !view.isExternal()) { String fqdn = view.getDatabase() + "." + view.getTableName(); MetascopeTable table = sqlRepository.findTable(connection, fqdn); if (table == null) { table = new MetascopeTable(); table.setFqdn(fqdn); } cachedTables.put(fqdn, table); tableCount++; } } LOG.info("Received " + tableCount + " tables"); for (View view : viewStatus.getViews()) { if (view.isTable() && !view.isExternal()) { String fqdn = view.getDatabase() + "." + view.getTableName(); LOG.info("Consuming table " + fqdn); MetascopeTable table = cachedTables.get(fqdn); table.setSchedoscopeId(schedoscopeInstance.getId()); table.setDatabaseName(view.getDatabase()); table.setTableName(view.getTableName()); table.setViewPath(view.viewPath()); table.setExternalTable(view.isExternal()); table.setTableDescription(view.getComment()); table.setStorageFormat(view.getStorageFormat()); table.setMaterializeOnce(view.isMaterializeOnce()); for (ViewField field : view.getFields()) { if (field.getName().equals(OCCURRED_AT)) { table.setTimestampField(OCCURRED_AT); table.setTimestampFieldFormat(SCHEDOSCOPE_TIMESTAMP_FORMAT); break; } else if (field.getName().equals(OCCURRED_UNTIL)) { table.setTimestampField(OCCURRED_UNTIL); table.setTimestampFieldFormat(SCHEDOSCOPE_TIMESTAMP_FORMAT); break; } } /** fields */ Set<MetascopeField> tableFields = new HashSet<>(); int i = 0; for (ViewField viewField : view.getFields()) { String fieldFqdn = fqdn + "." + viewField.getName(); MetascopeField field = sqlRepository.findField(connection, fieldFqdn); if (field == null) { field = new MetascopeField(); field.setFieldId(fieldFqdn); field.setTableFqdn(fqdn); } field.setFieldName(viewField.getName()); field.setFieldType(viewField.getFieldtype()); field.setFieldOrder(i++); field.setParameter(false); field.setDescription(viewField.getComment()); //lineage if (view.getLineage() != null && view.getLineage().get(fieldFqdn) != null) { for (String dependencyField : view.getLineage().get(fieldFqdn)) { if (!dependencyField.equals(fieldFqdn)) { MetascopeField dField = cachedFields.get(dependencyField); if (dField == null) { dField = new MetascopeField(); dField.setFieldId(dependencyField); cachedFields.put(dependencyField, dField); } fieldDependencies.add(new Dependency(field.getFieldId(), dField.getFieldId())); } } } tableFields.add(field); cachedFields.put(field.getFieldId(), field); } table.setFields(tableFields); sqlRepository.saveFields(connection, table.getFields(), table.getFqdn(), false); /** parameter */ Set<MetascopeField> tableParameter = new HashSet<>(); i = 0; for (ViewField viewField : view.getParameters()) { String parameterFqdn = fqdn + "." + viewField.getName(); MetascopeField parameter = sqlRepository.findField(connection, parameterFqdn); if (parameter == null) { parameter = new MetascopeField(); parameter.setFieldId(parameterFqdn); parameter.setTableFqdn(fqdn); } parameter.setFieldName(viewField.getName()); parameter.setFieldType(viewField.getFieldtype()); parameter.setFieldOrder(i++); parameter.setParameter(true); parameter.setDescription(viewField.getComment()); parameter.setTable(table); tableParameter.add(parameter); } table.setParameters(tableParameter); sqlRepository.saveFields(connection, table.getParameters(), table.getFqdn(), true); /** exports */ List<MetascopeExport> tableExports = new ArrayList<>(); i = 0; if (view.getExport() != null) { for (ViewTransformation viewExport : view.getExport()) { String exportFqdn = fqdn + "." + viewExport.getName() + "_" + i; MetascopeExport export = sqlRepository.findExport(connection, exportFqdn); if (export == null) { export = new MetascopeExport(); export.setExportId(exportFqdn); export.setTableFqdn(fqdn); } export.setExportType(viewExport.getName()); export.setProperties(viewExport.getProperties()); export.setTable(table); tableExports.add(export); i++; } } table.setExports(tableExports); sqlRepository.saveExports(connection, table.getExports(), table.getFqdn()); /** transformation */ MetascopeTransformation metascopeTransformation = new MetascopeTransformation(); metascopeTransformation.setTransformationId(fqdn + "." + view.getTransformation().getName()); metascopeTransformation.setTransformationType(view.getTransformation().getName()); metascopeTransformation.setProperties(view.getTransformation().getProperties()); table.setTransformation(metascopeTransformation); sqlRepository.saveTransformation(connection, table.getTransformation(), table.getFqdn()); /** views and dependencies */ LOG.info("Getting views for table " + fqdn); List<View> views = getViewsForTable(table.getViewPath(), viewStatus.getViews()); LOG.info("Found " + views.size() + " views"); for (View partition : views) { MetascopeView metascopeView = cachedViews.get(partition.getName()); if (metascopeView == null) { metascopeView = new MetascopeView(); metascopeView.setViewUrl(partition.getName()); metascopeView.setViewId(partition.getName()); cachedViews.put(partition.getName(), metascopeView); } viewsToPersist.add(metascopeView); if (table.getParameters() != null && table.getParameters().size() > 0) { String parameterString = getParameterString(partition.getName(), table); metascopeView.setParameterString(parameterString); } for (List<String> dependencyLists : partition.getDependencies().values()) { for (String dependency : dependencyLists) { MetascopeView dependencyView = cachedViews.get(dependency); if (dependencyView == null) { dependencyView = new MetascopeView(); dependencyView.setViewUrl(dependency); dependencyView.setViewId(dependency); cachedViews.put(dependency, dependencyView); } metascopeView.addToDependencies(dependencyView); dependencyView.addToSuccessors(metascopeView); viewDependencies .add(new Dependency(metascopeView.getViewId(), dependencyView.getViewId())); } } for (String dependency : partition.getDependencies().keySet()) { tableDependencies.add(new Dependency(fqdn, dependency)); } cachedViews.put(partition.getName(), metascopeView); metascopeView.setTable(table); } LOG.info("Processed all views for table " + fqdn); table.setViewsSize(views.size()); sqlRepository.saveTable(connection, table); LOG.info("Finished processing table " + fqdn); } } try { LOG.info("Saving field dependency information (" + fieldDependencies.size() + ") ..."); sqlRepository.insertFieldDependencies(connection, cachedTables.values(), fieldDependencies); LOG.info("Saving table dependency information (" + tableDependencies.size() + ") ..."); sqlRepository.insertTableDependencies(connection, cachedTables.values(), tableDependencies); LOG.info("Saving views (" + viewsToPersist.size() + ")..."); sqlRepository.insertOrUpdateViews(connection, viewsToPersist); LOG.info("Saving view dependency information (" + viewDependencies.size() + ") ..."); sqlRepository.insertViewDependencies(connection, viewDependencies); } catch (Exception e) { LOG.error("Error writing to database", e); } LOG.info("Saving to index"); for (MetascopeTable table : cachedTables.values()) { solrFacade.updateTablePartial(table, false); } solrFacade.commit(); LOG.info("Finished index update"); sqlRepository.saveMetadata(connection, "schedoscopeTimestamp", String.valueOf(System.currentTimeMillis())); try { connection.close(); } catch (SQLException e) { LOG.error("Could not close connection", e); } LOG.info("Finished sync with schedoscope instance \"" + schedoscopeInstance.getId() + "\""); return true; }
From source file:es.emergya.bbdd.dao.RolHome.java
@Transactional(propagation = Propagation.REQUIRES_NEW, readOnly = true, rollbackFor = Throwable.class) public Integer getTotal() { Integer res = new Integer(-1); Session currentSession = getSession(); currentSession.clear();// w w w.j av a2s .com Criteria criteria = currentSession.createCriteria(Rol.class).setProjection(Projections.rowCount()); Integer count = (Integer) criteria.uniqueResult(); res = count.intValue(); return res; }