Example usage for org.springframework.transaction.annotation Propagation NOT_SUPPORTED

List of usage examples for org.springframework.transaction.annotation Propagation NOT_SUPPORTED

Introduction

In this page you can find the example usage for org.springframework.transaction.annotation Propagation NOT_SUPPORTED.

Prototype

Propagation NOT_SUPPORTED

To view the source code for org.springframework.transaction.annotation Propagation NOT_SUPPORTED.

Click Source Link

Document

Execute non-transactionally, suspend the current transaction if one exists.

Usage

From source file:org.finra.herd.service.impl.UploadDownloadHelperServiceImpl.java

@Override
@Transactional(propagation = Propagation.NOT_SUPPORTED)
public void deleteSourceFileFromS3(CompleteUploadSingleParamsDto completeUploadSingleParamsDto) {
    deleteSourceFileFromS3Impl(completeUploadSingleParamsDto);
}

From source file:org.finra.herd.service.impl.UploadDownloadServiceImpl.java

@Override
@Transactional(propagation = Propagation.NOT_SUPPORTED)
public CompleteUploadSingleMessageResult performCompleteUploadSingleMessage(String objectKey) {
    return performCompleteUploadSingleMessageImpl(objectKey);
}

From source file:org.finra.herd.service.JobServiceTest.java

@Test
@Transactional(propagation = Propagation.NOT_SUPPORTED)
public void testDeleteJobActiveJobWithMultipleSubProcesses() throws Exception {
    // Create and persist a test job definition.
    executeJdbcTestHelper.prepareHerdDatabaseForExecuteJdbcWithReceiveTaskTest(TEST_ACTIVITI_NAMESPACE_CD,
            TEST_ACTIVITI_JOB_NAME, ACTIVITI_XML_TEST_MULTIPLE_SUB_PROCESSES);

    try {/*from ww  w . ja  v  a 2  s .  c  o m*/
        // Get the job definition entity and ensure it exists.
        JobDefinitionEntity jobDefinitionEntity = jobDefinitionDao
                .getJobDefinitionByAltKey(TEST_ACTIVITI_NAMESPACE_CD, TEST_ACTIVITI_JOB_NAME);
        assertNotNull(jobDefinitionEntity);

        // Get the process definition id.
        String processDefinitionId = jobDefinitionEntity.getActivitiId();

        // Build the parameters map.
        Map<String, Object> parameters = new HashMap<>();
        parameters.put("counter", 0);

        // Start the job.
        ProcessInstance processInstance = activitiService
                .startProcessInstanceByProcessDefinitionId(processDefinitionId, parameters);
        assertNotNull(processInstance);

        // Get the process instance id for this job.
        String processInstanceId = processInstance.getProcessInstanceId();

        // Wait for all processes to become active - we expect to have the main process along with 800 sub-processes.
        waitUntilActiveProcessesThreshold(processDefinitionId, 801);

        // Get the job and validate that it is RUNNING.
        Job getJobResponse = jobService.getJob(processInstanceId, true);
        assertNotNull(getJobResponse);
        assertEquals(JobStatusEnum.RUNNING, getJobResponse.getStatus());

        // Delete the job and validate the response.
        Job deleteJobResponse = jobService.deleteJob(processInstanceId,
                new JobDeleteRequest(ACTIVITI_JOB_DELETE_REASON));
        assertEquals(JobStatusEnum.COMPLETED, deleteJobResponse.getStatus());
        assertEquals(ACTIVITI_JOB_DELETE_REASON, deleteJobResponse.getDeleteReason());

        // Validate the historic process instance.
        HistoricProcessInstance historicProcessInstance = activitiHistoryService
                .createHistoricProcessInstanceQuery().processInstanceId(processInstanceId).singleResult();
        assertNotNull(historicProcessInstance);
        assertEquals(ACTIVITI_JOB_DELETE_REASON, historicProcessInstance.getDeleteReason());
    } finally {
        // Clean up the Herd database.
        executeJdbcTestHelper.cleanUpHerdDatabaseAfterExecuteJdbcWithReceiveTaskTest(TEST_ACTIVITI_NAMESPACE_CD,
                TEST_ACTIVITI_JOB_NAME);

        // Clean up the Activiti.
        deleteActivitiDeployments();
    }
}

From source file:org.finra.herd.service.JobServiceTest.java

@Test
@Transactional(propagation = Propagation.NOT_SUPPORTED)
public void testDeleteJobSuspendedJobWithMultipleSubProcesses() throws Exception {
    // Create and persist a test job definition.
    executeJdbcTestHelper.prepareHerdDatabaseForExecuteJdbcWithReceiveTaskTest(TEST_ACTIVITI_NAMESPACE_CD,
            TEST_ACTIVITI_JOB_NAME, ACTIVITI_XML_TEST_MULTIPLE_SUB_PROCESSES);

    try {//from w  ww.  ja v a  2 s  .  co  m
        // Get the job definition entity and ensure it exists.
        JobDefinitionEntity jobDefinitionEntity = jobDefinitionDao
                .getJobDefinitionByAltKey(TEST_ACTIVITI_NAMESPACE_CD, TEST_ACTIVITI_JOB_NAME);
        assertNotNull(jobDefinitionEntity);

        // Get the process definition id.
        String processDefinitionId = jobDefinitionEntity.getActivitiId();

        // Build the parameters map.
        Map<String, Object> parameters = new HashMap<>();
        parameters.put("counter", 0);

        // Start the job.
        ProcessInstance processInstance = activitiService
                .startProcessInstanceByProcessDefinitionId(processDefinitionId, parameters);
        assertNotNull(processInstance);

        // Get the process instance id for this job.
        String processInstanceId = processInstance.getProcessInstanceId();

        // Wait for all processes to become active - we expect to have the main process along with 800 sub-processes.
        waitUntilActiveProcessesThreshold(processDefinitionId, 801);

        // Get the job and validate that it is RUNNING.
        Job getJobResponse = jobService.getJob(processInstanceId, true);
        assertNotNull(getJobResponse);
        assertEquals(JobStatusEnum.RUNNING, getJobResponse.getStatus());

        // Suspend the job.
        jobService.updateJob(processInstanceId, new JobUpdateRequest(JobActionEnum.SUSPEND));

        // Get the job again and validate that it is now SUSPENDED.
        getJobResponse = jobService.getJob(processInstanceId, true);
        assertNotNull(getJobResponse);
        assertEquals(JobStatusEnum.SUSPENDED, getJobResponse.getStatus());

        // Delete the job in suspended state and validate the response.
        Job deleteJobResponse = jobService.deleteJob(processInstanceId,
                new JobDeleteRequest(ACTIVITI_JOB_DELETE_REASON));
        assertEquals(JobStatusEnum.COMPLETED, deleteJobResponse.getStatus());
        assertEquals(ACTIVITI_JOB_DELETE_REASON, deleteJobResponse.getDeleteReason());

        // Validate the historic process instance.
        HistoricProcessInstance historicProcessInstance = activitiHistoryService
                .createHistoricProcessInstanceQuery().processInstanceId(processInstanceId).singleResult();
        assertNotNull(historicProcessInstance);
        assertEquals(ACTIVITI_JOB_DELETE_REASON, historicProcessInstance.getDeleteReason());
    } finally {
        // Clean up the Herd database.
        executeJdbcTestHelper.cleanUpHerdDatabaseAfterExecuteJdbcWithReceiveTaskTest(TEST_ACTIVITI_NAMESPACE_CD,
                TEST_ACTIVITI_JOB_NAME);

        // Clean up the Activiti.
        deleteActivitiDeployments();
    }
}

From source file:org.hyperic.hq.authz.server.session.ResourceManagerImpl.java

/**
 * Removes the specified resource by nulling out its resourceType. Will not
 * null the resourceType of the resource which is passed in. These resources
 * need to be cleaned up eventually by/*from  ww w  .  j a  v  a 2s  . c om*/
 * {@link AppdefBossImpl.removeDeletedResources}. This may be done in the
 * background via zevent by issuing a {@link ResourcesCleanupZevent}.
 * @see {@link AppdefBossImpl.removeDeletedResources}
 * @see {@link ResourcesCleanupZevent}
 * @param r {@link Resource} resource to be removed.
 * @param nullResourceType tells the method to null out the resourceType
 * @param removeAllVirtual tells the method to remove all resources, including
 *        associated platforms, under the virtual resource hierarchy
 * @return AppdefEntityID[] - an array of the resources (including children)
 *         deleted
 * TODO suspending transaction was a performance enhancement from previous releases (i.e. 4.3)
 * It makes integration testing difficult, but we need to live w/it for now  until we can
 * re-visit the whole asynch deletion concept.  Makes a HUGE difference in performance.
 */
@Transactional(propagation = Propagation.NOT_SUPPORTED)
public AppdefEntityID[] removeResourceAndRelatedResources(AuthzSubject subj, Resource r,
        boolean nullResourceType, boolean removeAllVirtual) throws VetoException, PermissionException {
    final ResourceType resourceType = r.getResourceType();

    // Possible this resource has already been marked for deletion
    if (resourceType == null) {
        return new AppdefEntityID[0];
    }

    // Make sure user has permission to remove this resource
    final PermissionManager pm = PermissionManagerFactory.getInstance();
    String opName = null;

    if (resourceType.getId().equals(AuthzConstants.authzPlatform)) {
        opName = AuthzConstants.platformOpRemovePlatform;
    } else if (resourceType.getId().equals(AuthzConstants.authzServer)) {
        opName = AuthzConstants.serverOpRemoveServer;
    } else if (resourceType.getId().equals(AuthzConstants.authzService)) {
        opName = AuthzConstants.serviceOpRemoveService;
    } else if (resourceType.getId().equals(AuthzConstants.authzApplication)) {
        opName = AuthzConstants.appOpRemoveApplication;
    } else if (resourceType.getId().equals(AuthzConstants.authzGroup)) {
        opName = AuthzConstants.groupOpRemoveResourceGroup;
    }

    final boolean debug = log.isDebugEnabled();
    final StopWatch watch = new StopWatch();
    if (debug)
        watch.markTimeBegin("removeResourceAndRelatedResources.pmCheck");
    pm.check(subj.getId(), resourceType, r.getInstanceId(), opName);
    if (debug) {
        watch.markTimeEnd("removeResourceAndRelatedResources.pmCheck");
    }

    ResourceEdgeDAO edgeDao = resourceEdgeDAO;
    if (debug) {
        watch.markTimeBegin("removeResourceAndRelatedResources.findEdges");
    }
    Collection<ResourceEdge> edges = edgeDao.findDescendantEdges(r, getContainmentRelation());
    Collection<ResourceEdge> virtEdges = edgeDao.findDescendantEdges(r, getVirtualRelation());
    if (debug) {
        watch.markTimeEnd("removeResourceAndRelatedResources.findEdges");
    }
    Set<AppdefEntityID> removed = new HashSet<AppdefEntityID>();
    for (ResourceEdge edge : edges) {
        // Remove descendants' permissions
        removed.addAll(
                Arrays.asList(removeResourceAndRelatedResources(subj, edge.getTo(), true, removeAllVirtual)));
    }

    for (ResourceEdge edge : virtEdges) {
        Resource prototype = edge.getTo().getPrototype();

        if (!removeAllVirtual
                && (prototype == null || !AuthzConstants.VMWARE_PROTOTYPES.contains(prototype.getName()))) {
            // do not remove the associated resources,
            // but remove the virtual resource edges
            resourceRemover.removeEdges(edge.getTo(), getVirtualRelation());
        } else {
            resourceRemover.removeResource(subj, edge.getTo(), true);
        }
    }

    removed.add(AppdefUtil.newAppdefEntityId(r));
    if (debug) {
        watch.markTimeBegin("removeResource");
    }
    resourceRemover.removeResource(subj, r, nullResourceType);
    if (debug) {
        watch.markTimeEnd("removeResource");
        log.debug(watch);
    }
    return removed.toArray(new AppdefEntityID[0]);
}

From source file:org.hyperic.hq.measurement.server.session.MeasurementProcessorImpl.java

/**
 * @param eids List<AppdefEntityID>
 *//*  w  w w.  j a va  2 s .  c  o  m*/
@Transactional(propagation = Propagation.NOT_SUPPORTED, readOnly = true)
public void scheduleEnabled(Agent agent, Collection<AppdefEntityID> eids) throws MonitorAgentException {
    final StopWatch watch = new StopWatch();
    final boolean debug = log.isDebugEnabled();
    if (debug)
        watch.markTimeBegin("findEnabledMeasurements");
    Map<Integer, List<Measurement>> measMap = measurementManager.findEnabledMeasurements(eids);
    if (debug)
        watch.markTimeEnd("findEnabledMeasurements");

    // availability measurements in scheduled downtime are disabled, but not unscheduled.
    // need to schedule these measurements for "new" agents.
    if (debug)
        watch.markTimeBegin("getAvailMeasurementsInDowntime");
    Map<Integer, Measurement> downtimeMeasMap = availManager.getAvailMeasurementsInDowntime(eids);
    if (debug)
        watch.markTimeEnd("getAvailMeasurementsInDowntime");

    for (Map.Entry<Integer, Measurement> entry : downtimeMeasMap.entrySet()) {
        Integer resourceId = entry.getKey();
        List<Measurement> measurements = measMap.get(resourceId);
        if (measurements == null) {
            measurements = new ArrayList<Measurement>();
            measMap.put(resourceId, measurements);
        }
        measurements.add(entry.getValue());
    }

    final int batchSize = 10000;
    final List<AppdefEntityID> aeids = new ArrayList<AppdefEntityID>(eids);
    for (int i = 0; i < aeids.size(); i += batchSize) {
        final int end = Math.min(i + batchSize, aeids.size());
        if (debug)
            watch.markTimeBegin("scheduleMeasurements[" + end + "]");
        scheduleMeasurements(agent, measMap, aeids.subList(i, end));
        if (debug)
            watch.markTimeEnd("scheduleMeasurements[" + end + "]");
    }
    if (debug) {
        log.debug("scheduleEnabled: " + watch + ", { Size: [appdefEntity=" + eids.size()
                + "] [availMeasurementsInDowntime=" + downtimeMeasMap.size() + "] }");
    }
}

From source file:org.nuclos.server.masterdata.ejb3.MasterDataFacadeBean.java

/**
 * Validate all masterdata entries against their meta information (length,
 * format, min, max etc.). The transaction type is "not supported" here in
 * order to avoid a transaction timeout, as the whole operation may take some
 * time./*from w  ww . ja v  a  2  s.c  om*/
 *
 * @param sOutputFileName the name of the csv file to which the results are
 *           written.
 */
@Transactional(propagation = Propagation.NOT_SUPPORTED, noRollbackFor = { Exception.class })
@RolesAllowed("UseManagementConsole")
public void checkMasterDataValues(String sOutputFileName) {
    final PrintStream ps;
    try {
        ps = new PrintStream(new BufferedOutputStream(new FileOutputStream(sOutputFileName)), true);
    } catch (FileNotFoundException ex) {
        throw new NuclosFatalException(
                StringUtils.getParameterizedExceptionMessage("masterdata.error.missing.file", sOutputFileName),
                ex);
    }

    ps.println("Entit\u00e4t; ID; Fehlermeldung");
    for (MasterDataMetaVO mdmcvo : MasterDataMetaCache.getInstance().getAllMetaData()) {
        final String sEntityName = mdmcvo.getEntityName();
        try {
            for (MasterDataVO mdvo : helper.getGenericMasterData(sEntityName, null, true)) {
                try {
                    // validate each record
                    mdvo.validate(mdmcvo);
                } catch (CommonValidationException ex) {
                    final StringBuilder sbResult = new StringBuilder();
                    sbResult.append(sEntityName);
                    sbResult.append(";");
                    sbResult.append(mdvo.getId());
                    sbResult.append(";");
                    sbResult.append(ex.getMessage());
                    ps.println(sbResult.toString());
                }
            }
        } catch (Exception e) {
            LOG.error("checkMasterDataValues failed: " + e, e);
            error("Error while validating entity " + sEntityName);
        }
    }
    if (ps != null) {
        ps.close();
    }
    if (ps != null && ps.checkError()) {
        throw new NuclosFatalException("Failed to close PrintStream.");
    }
}