List of usage examples for org.springframework.transaction.annotation Propagation NEVER
Propagation NEVER
To view the source code for org.springframework.transaction.annotation Propagation NEVER.
Click Source Link
From source file:app.core.TransactionTestHelper.java
@Transactional(propagation = Propagation.NEVER) public void throwExceptionIfExistingTransaction() { }
From source file:org.ops4j.orient.spring.tx.object.TransactionalObjectService.java
@Transactional(propagation = Propagation.NEVER) public void registerEntityClasses() { registerClass(Person.class); }
From source file:io.cloudslang.engine.queue.services.recovery.ExecutionRecoveryServiceImpl.java
@Override @Transactional(propagation = Propagation.NEVER) public void doRecovery() { if (logger.isDebugEnabled()) { logger.debug("Begin recovery"); }//from ww w . jav a2 s . c om recoverWorkers(); assignRecoveredMessages(); if (logger.isDebugEnabled()) { logger.debug("End recovery"); } }
From source file:com.google.ie.business.dao.impl.TagDaoImpl.java
@Override @Transactional(propagation = Propagation.NEVER) public List<Tag> getTagsByKeys(Collection<String> keys) { Collection<Tag> tags = null; tags = getJdoTemplate().find(Tag.class, ":keyList.contains(key)", null, keys, null); if (tags == null) return null; return new ArrayList<Tag>(tags); }
From source file:edu.harvard.med.screensaver.io.libraries.WellsSdfDataExporter.java
@Transactional(readOnly = true, propagation = Propagation.NEVER) /* avoid accumulating entity objects in the Hibernate session, for scalability */ public InputStream export(final Iterator<String> wellKeyStrs) throws IOException { WellSdfWriter writer = null;// w ww . j a v a 2s.c o m File outFile = null; try { outFile = File.createTempFile("wellsSdfDataExporter", "sdf"); log.debug("creating temp file: " + outFile); outFile.deleteOnExit(); FileWriter outWriter = new FileWriter(outFile); writer = new WellSdfWriter(new PrintWriter(outWriter)); EntityDataFetcher<Well, String> dataFetcher = new EntityDataFetcher<Well, String>(Well.class, _dao); List<PropertyPath<Well>> relationships = Lists.newArrayList(); relationships.add(Well.library.toFullEntity()); relationships.add(Well.reagents.to(Reagent.publications).toFullEntity()); RelationshipPath<Well> toReagentPath; if (getLibraryContentsVersion() == null) { toReagentPath = Well.latestReleasedReagent; } else { toReagentPath = Well.reagents.restrict("libraryContentsVersion", getLibraryContentsVersion()); } relationships.add(toReagentPath.to(Reagent.libraryContentsVersion).toFullEntity()); relationships.add(toReagentPath.to(SmallMoleculeReagent.compoundNames)); relationships.add(toReagentPath.to(SmallMoleculeReagent.pubchemCids)); relationships.add(toReagentPath.to(SmallMoleculeReagent.chembankIds)); relationships.add(toReagentPath.to(SmallMoleculeReagent.chemblIds)); relationships.add(toReagentPath.to(SmallMoleculeReagent.molfileList)); relationships.add(toReagentPath.to(SilencingReagent.facilityGenes).to(Gene.genbankAccessionNumbers) .toFullEntity()); dataFetcher.setPropertiesToFetch(relationships); writeSDFileSearchResults(writer, Lists.newArrayList(wellKeyStrs), dataFetcher); } finally { IOUtils.closeQuietly(writer); } return new FileInputStream(outFile); }
From source file:com.expedia.seiso.domain.service.impl.ItemServiceImpl.java
/** * Using {@link Propagation.NEVER} because we don't want a single error to wreck the entire operation. *//*from w w w . java2s . com*/ @Override @Transactional(propagation = Propagation.NEVER) public SaveAllResponse saveAll(@NonNull Class itemClass, @NonNull List<? extends Item> items, boolean mergeAssociations) { val numItems = items.size(); val itemClassName = itemClass.getSimpleName(); log.info("Batch saving {} items ({})", numItems, itemClass.getSimpleName()); val errors = new ArrayList<SaveAllError>(); for (val item : items) { try { // Have to doInTransaction() since calling save() happens behind the transactional proxy. // Also, see http://stackoverflow.com/questions/5568409/java-generics-void-void-types txTemplate.execute(new TransactionCallback<Void>() { @Override public Void doInTransaction(TransactionStatus status) { save(item, mergeAssociations); return null; } }); } catch (RuntimeException e) { e.printStackTrace(); val message = e.getClass() + ": " + e.getMessage(); errors.add(new SaveAllError(item.itemKey(), message)); } } val numErrors = errors.size(); if (numErrors == 0) { log.info("Batch saved {} items ({}) with no errors", numItems, itemClassName); } else { log.warn("Batch saved {} items ({}) with {} errors: {}", numItems, itemClassName, numErrors, errors); } return new SaveAllResponse(numItems, numErrors, errors); }
From source file:com.expedia.seiso.web.controller.v1.ItemControllerV1.java
@RequestMapping(value = "/{repoKey}", method = RequestMethod.POST, params = "mode=batch", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) @Transactional(propagation = Propagation.NEVER) public SaveAllResponse postAll(@PathVariable String repoKey, PEResources peResources) { log.trace("Batch saving {} items: repoKey={}", peResources.size(), repoKey); val itemClass = itemMetaLookup.getItemClass(repoKey); return delegate.postAll(itemClass, peResources, true); }
From source file:com.vmware.bdd.manager.TestClusteringJobs.java
@Test(groups = { "TestClusteringJobs" }) @Transactional(propagation = Propagation.NEVER) public void testCreateCluster() throws Exception { ClusterCreate createSpec = new ClusterCreate(); createSpec.setName(TEST_CLUSTER_NAME); createSpec.setAppManager("Default"); createSpec.setType(ClusterType.HDFS_MAPRED); createSpec.setNetworkConfig(createNetConfig(TEST_DHCP_NETWORK_NAME, dhcpPortgroup)); createSpec.setDistro("bigtop"); createSpec.setDistroVendor(Constants.DEFAULT_VENDOR); long jobExecutionId = clusterMgr.createCluster(createSpec, new BaseConfiguration()); ClusterRead cluster = clusterMgr.getClusterByName(TEST_CLUSTER_NAME, false); Assert.assertTrue(cluster.getStatus() == ClusterStatus.PROVISIONING, "Cluster status should be PROVISIONING, but got " + cluster.getStatus()); waitTaskFinished(jobExecutionId);//ww w . ja v a 2s . c o m cluster = clusterEntityMgr.findClusterWithNodes(TEST_CLUSTER_NAME, true); Assert.assertTrue(cluster.getInstanceNum() == 5, "Cluster instance number should be 5, but got " + cluster.getInstanceNum()); Assert.assertTrue(cluster.getStatus() == ClusterStatus.RUNNING, "Cluster status should be RUNNING, but got " + cluster.getStatus()); //checkIpRange(cluster); checkVcFolders(TEST_CLUSTER_NAME); checkVcResourePools(cluster, ConfigInfo.getSerengetiUUID() + "-" + TEST_CLUSTER_NAME); checkDiskLayout(cluster); }
From source file:com.vmware.bdd.manager.TestClusteringJobs.java
@Test(groups = { "TestClusteringJobs" }, dependsOnMethods = { "testCreateCluster" }) @Transactional(propagation = Propagation.NEVER) public void testCreateClusterFailed() throws Exception { ClusterCreate createSpec = ClusterSpecFactory.createDefaultSpec(ClusterType.HDFS_MAPRED, Constants.DEFAULT_VENDOR, null, null); createSpec.setAppManager("Default"); createSpec.setName(TEST_DHCP_CLUSTER_NAME); createSpec.setNetworkConfig(createNetConfig(TEST_DHCP_NETWORK_NAME, dhcpPortgroup)); createSpec.setDistro("bigtop"); NodeGroupCreate worker = createSpec.getNodeGroup("worker"); worker.setInstanceNum(1);/*from w w w. j a v a2 s.c o m*/ long jobExecutionId = clusterMgr.createCluster(createSpec, new BaseConfiguration()); ClusterRead cluster = clusterMgr.getClusterByName(TEST_DHCP_CLUSTER_NAME, false); Assert.assertTrue(cluster.getStatus() == ClusterStatus.PROVISIONING, "Cluster status should be PROVISIONING, but got " + cluster.getStatus()); stopVmAfterStarted( vcRP + "/" + ConfigInfo.getSerengetiUUID() + "-" + TEST_DHCP_CLUSTER_NAME + "/" + "worker", TEST_DHCP_CLUSTER_NAME + "-worker-0", jobExecutionId); waitTaskFinished(jobExecutionId); cluster = clusterMgr.getClusterByName(TEST_DHCP_CLUSTER_NAME, false); Assert.assertTrue(cluster.getInstanceNum() == 3, "Cluster instance number should be 3, but got " + cluster.getInstanceNum()); Assert.assertTrue(cluster.getStatus() == ClusterStatus.PROVISION_ERROR, "Cluster status should be PROVISION_ERROR, but got " + cluster.getStatus()); }