Example usage for javax.ejb TransactionAttributeType NEVER

List of usage examples for javax.ejb TransactionAttributeType NEVER

Introduction

In this page you can find the example usage for javax.ejb TransactionAttributeType NEVER.

Prototype

TransactionAttributeType NEVER

To view the source code for javax.ejb TransactionAttributeType NEVER.

Click Source Link

Document

The client is required to call without a transaction context, otherwise an exception is thrown.

Usage

From source file:org.rhq.enterprise.server.bundle.BundleManagerBean.java

@Override
@RequiredPermission(Permission.MANAGE_BUNDLE)
@TransactionAttribute(TransactionAttributeType.NEVER)
public BundleVersion createBundleVersionViaURL(Subject subject, String distributionFileUrl) throws Exception {

    // validate by immediately creating a URL
    URL url = new URL(distributionFileUrl);

    // get the distro file into a tmp dir
    // create temp file
    File tempDistributionFile = null;
    InputStream is = null;/*from   w  w w.  j  a  va  2  s  .c o  m*/
    OutputStream os = null;
    BundleVersion bundleVersion = null;

    try {
        tempDistributionFile = File.createTempFile("bundle-distribution", ".zip");

        is = url.openStream();
        os = new FileOutputStream(tempDistributionFile);
        long len = StreamUtil.copy(is, os);
        is = null;
        os = null;
        log.debug("Copied [" + len + "] bytes from [" + distributionFileUrl + "] into ["
                + tempDistributionFile.getPath() + "]");

        bundleVersion = createBundleVersionViaFile(subject, tempDistributionFile);
    } finally {
        if (null != tempDistributionFile) {
            tempDistributionFile.delete();
        }
        safeClose(is);
        safeClose(os);
    }

    return bundleVersion;
}

From source file:org.rhq.enterprise.server.bundle.BundleManagerBean.java

@Override
@RequiredPermission(Permission.MANAGE_BUNDLE)
@TransactionAttribute(TransactionAttributeType.NEVER)
public void purgeBundleDestination(Subject subject, int bundleDestinationId) throws Exception {
    // find the live bundle deployment for this destination, and get all the resource deployments for that live deployment
    BundleDeploymentCriteria bdc = new BundleDeploymentCriteria();
    bdc.addFilterDestinationId(bundleDestinationId);
    bdc.addFilterIsLive(true);/*from   w  ww  .  j  av a2  s .c o  m*/
    bdc.fetchBundleVersion(true);
    bdc.fetchResourceDeployments(true);
    bdc.fetchDestination(true);
    List<BundleDeployment> liveDeployments = bundleManager.findBundleDeploymentsByCriteria(subject, bdc);
    if (1 != liveDeployments.size()) {
        throw new IllegalArgumentException(
                "No live deployment to purge is found for destinationId [" + bundleDestinationId + "]");
    }
    BundleDeployment liveDeployment = liveDeployments.get(0);
    List<BundleResourceDeployment> resourceDeploys = liveDeployment.getResourceDeployments();
    if (resourceDeploys == null || resourceDeploys.isEmpty()) {
        return; // nothing to do
    }

    // we need to obtain the bundle type (the remote plugin container needs it). our first criteria can't fetch this deep, we have to do another query.
    BundleVersionCriteria bvc = new BundleVersionCriteria();
    bvc.addFilterId(liveDeployment.getBundleVersion().getId());
    bvc.fetchBundle(true); // will eagerly fetch the bundle type
    PageList<BundleVersion> bvs = bundleManager.findBundleVersionsByCriteria(subject, bvc);
    liveDeployment.setBundleVersion(bvs.get(0)); // wire up the full bundle version back into the live deployment
    // the bundle type doesn't eagerly load the resource type - the remote plugin container needs that too
    ResourceTypeCriteria rtc = new ResourceTypeCriteria();
    rtc.addFilterBundleTypeId(liveDeployment.getBundleVersion().getBundle().getBundleType().getId());
    PageList<ResourceType> rts = resourceTypeManager.findResourceTypesByCriteria(subject, rtc);
    liveDeployment.getBundleVersion().getBundle().getBundleType().setResourceType(rts.get(0));

    // we need to obtain the resources for all resource deployments - our first criteria can't fetch this deep, we have to do another query.
    List<Integer> resourceDeployIds = new ArrayList<Integer>();
    for (BundleResourceDeployment resourceDeploy : resourceDeploys) {
        resourceDeployIds.add(resourceDeploy.getId());
    }
    BundleResourceDeploymentCriteria brdc = new BundleResourceDeploymentCriteria();
    brdc.addFilterIds(resourceDeployIds.toArray(new Integer[resourceDeployIds.size()]));
    brdc.fetchResource(true);
    brdc.setPageControl(PageControl.getUnlimitedInstance());
    PageList<BundleResourceDeployment> brdResults = bundleManager
            .findBundleResourceDeploymentsByCriteria(subject, brdc);
    resourceDeploys.clear();
    resourceDeploys.addAll(brdResults);
    // need to wire the live bundle deployment back in - no need for another query or fetch it above because we have it already
    for (BundleResourceDeployment brd : brdResults) {
        brd.setBundleDeployment(liveDeployment);
    }

    // loop through each deployment and purge it on agent
    Map<BundleResourceDeployment, String> failedToPurge = new HashMap<BundleResourceDeployment, String>();
    for (BundleResourceDeployment resourceDeploy : resourceDeploys) {
        try {
            // first put the user name that requested the purge in the audit trail
            BundleResourceDeploymentHistory history = new BundleResourceDeploymentHistory(subject.getName(),
                    "Purge Requested", "User [" + subject.getName() + "] requested to purge this deployment",
                    null, BundleResourceDeploymentHistory.Status.SUCCESS, null, null);
            bundleManager.addBundleResourceDeploymentHistory(subject, resourceDeploy.getId(), history);

            // get a connection to the agent and tell it to purge the bundle from the file system
            Subject overlord = subjectManager.getOverlord();
            AgentClient agentClient = agentManager.getAgentClient(overlord,
                    resourceDeploy.getResource().getId());
            BundleAgentService bundleAgentService = agentClient.getBundleAgentService();
            BundlePurgeRequest request = new BundlePurgeRequest(resourceDeploy);
            BundlePurgeResponse results = bundleAgentService.purge(request);
            if (!results.isSuccess()) {
                String errorMessage = results.getErrorMessage();
                failedToPurge.put(resourceDeploy, errorMessage);
            }
        } catch (Exception e) {
            String errorMessage = ThrowableUtil.getStackAsString(e);
            failedToPurge.put(resourceDeploy, errorMessage);
        }
    }

    // marks the live deployment "no longer live"
    bundleManager._finalizePurge(subjectManager.getOverlord(), liveDeployment, failedToPurge);

    // throw an exception if we failed to purge one or more resource deployments.
    // since we are not in a tx context, we lose nothing. All DB updates have already been committed by now
    // which is what we want. All this does is inform the caller something went wrong.
    if (!failedToPurge.isEmpty()) {
        int totalDeployments = liveDeployment.getResourceDeployments().size();
        int failedPurges = failedToPurge.size();
        throw new Exception("Failed to purge [" + failedPurges + "] of [" + totalDeployments
                + "] remote resource deployments");
    }
    return;
}

From source file:org.rhq.enterprise.server.cloud.StorageNodeManagerBean.java

@Override
@TransactionAttribute(TransactionAttributeType.NEVER)
public void linkResource(Resource resource) {
    Configuration pluginConfig = configurationManager.getPluginConfiguration(resource.getId());
    String address = pluginConfig.getSimpleValue(RHQ_STORAGE_ADDRESS_PROPERTY);

    if (log.isInfoEnabled()) {
        log.info("Linking " + resource + " to storage node at " + address);
    }/*from  w  w w .  ja  v a 2  s .c om*/
    try {
        StorageNode storageNode = storageNodeManager.findStorageNodeByAddress(address);
        if (storageNode == null) {
            if (InetAddresses.isInetAddress(address)) {
                String hostName = InetAddresses.forString(address).getHostName();
                log.info("Did not find storage node with address [" + address + "]. Searching by hostname ["
                        + hostName + "]");
                storageNode = storageNodeManager.findStorageNodeByAddress(hostName);
            } else {
                String ipAddress = InetAddress.getByName(address).getHostAddress();
                log.info("Did not find storage node with address [" + address + "] Searching by IP address ["
                        + ipAddress + "]");
                storageNode = storageNodeManager.findStorageNodeByAddress(ipAddress);
            }
        }

        if (storageNode != null) {
            if (log.isInfoEnabled()) {
                log.info(storageNode + " is an existing storage node. No cluster maintenance is necessary.");
            }
            storageNode.setAddress(address);
            storageNode.setResource(resource);
            storageNode.setOperationMode(OperationMode.NORMAL);
            storageNodeManager.linkExistingStorageNodeToResource(storageNode);

        } else {
            StorageClusterSettings clusterSettings = storageClusterSettingsManager
                    .getClusterSettings(subjectManager.getOverlord());
            storageNode = storageNodeManager.createStorageNode(resource, clusterSettings);

            if (log.isInfoEnabled()) {
                log.info("Scheduling cluster maintenance to deploy " + storageNode
                        + " into the storage cluster...");
            }
            if (clusterSettings.getAutomaticDeployment()) {
                log.info("Deploying " + storageNode);
                storageNodeManager.deployStorageNode(subjectManager.getOverlord(), storageNode);
            } else {
                log.info("Automatic deployment is disabled. " + storageNode + " will not become part of the "
                        + "cluster until it is deployed.");
            }
        }
    } catch (UnknownHostException e) {
        throw new RuntimeException("Could not resolve address [" + address + "]. The resource " + resource
                + " cannot be linked to a storage node", e);
    }
}

From source file:org.rhq.enterprise.server.content.ContentSourceManagerBean.java

@TransactionAttribute(TransactionAttributeType.NEVER)
public boolean internalSynchronizeContentSource(int contentSourceId) throws Exception {
    ContentServerPluginContainer pc = ContentManagerHelper.getPluginContainer();
    ContentProviderManager contentProviderManager = pc.getAdapterManager();
    return contentProviderManager.synchronizeContentProvider(contentSourceId);
}

From source file:org.rhq.enterprise.server.measurement.MeasurementBaselineManagerBean.java

@TransactionAttribute(TransactionAttributeType.NEVER)
public void calculateAutoBaselines() {
    Properties conf = systemManager.getSystemConfiguration(subjectManager.getOverlord());

    // frequency is how often the baselines are recalculated
    // data set is how far back for a particular scheduled measurement is included in the baseline calcs
    // frequency of 3 days and data set of 10 days means "every 3 days, recalculate baselines automatically.
    // For each scheduled measurement, take their last 10 days worth of data and use that data set
    // as the portion that will be used to get the min/max/average".
    String baselineFrequencyString = conf.getProperty(RHQConstants.BaselineFrequency);
    String baselineDataSetString = conf.getProperty(RHQConstants.BaselineDataSet);

    log.debug("Found baseline defaults: " + "frequency=" + baselineFrequencyString + " dataset="
            + baselineDataSetString);/*w  w w. ja v  a  2  s .c  om*/

    // Its time to auto-calculate the baselines again.
    // Determine how much data we need to calculate baselines for by determining the oldest and youngest
    // measurement data to include in the calculations.
    long amountOfData = Long.parseLong(baselineDataSetString);
    long baselineFrequency = Long.parseLong(baselineFrequencyString);
    if (baselineFrequency == 0) {
        log.info(
                "Baseline frequency is set to 0 - not recomputing baselines. Go to Admin->System settings to change this.");
        return;
    }
    long baselinesOlderThanTime = System.currentTimeMillis() - baselineFrequency;

    measurementBaselineManager.calculateAutoBaselines(amountOfData, baselinesOlderThanTime);

    // everything was calculated successfully, remember this time
    conf = systemManager.getSystemConfiguration(subjectManager.getOverlord()); // reload the config in case it was changed since we started
    try {
        systemManager.setSystemConfiguration(subjectManager.getOverlord(), conf, true);
    } catch (Exception e) {
        log.error("Failed to remember the time when we just calc'ed baselines - it may recalculate again soon.",
                e);
    }
}

From source file:org.rhq.enterprise.server.measurement.MeasurementBaselineManagerBean.java

@TransactionAttribute(TransactionAttributeType.NEVER)
public long calculateAutoBaselines(long amountOfData, long baselinesOlderThanTime) {
    try {/*ww w .  ja v  a2s . c  o m*/
        log.info("Calculating auto baselines");
        log.info("Deleting baselines computations older than " + new Date(baselinesOlderThanTime));
        log.info("Inserting new baselines using last " + (amountOfData / (24 * 60 * 60 * 1000L))
                + " days of 1H data");
        long now = System.currentTimeMillis();
        long computeTime = now;

        log.debug("computeTime = " + computeTime);

        int deleted = measurementBaselineManager._calculateAutoBaselinesDELETE(baselinesOlderThanTime);
        log.info("Removed [" + deleted + "] old baselines - they will now be recalculated ("
                + (System.currentTimeMillis() - now) + ")ms");

        now = System.currentTimeMillis();
        int totalInserted = 0;
        while (true) {
            /*
             * each call is done in a separate xtn of at most 100K inserted rows; this helps to keep the xtn
             * shorter to avoid timeouts in scenarios where baseline calculations bunch together. the idea was that
             * by basing a batch of baseline calculations off of the import time of the resource into inventory,
             * that the total work would naturally be staggered throughout the day. in practice, this didn't always
             * work as intended for one of several reasons:
             *
             *   1) all servers in the cloud were down for a few days (maybe a slow product upgrade, maybe a cold
             *      data center relocation)
             *   2) issues with running the job itself, if quartz had locking issues under severe load and somehow
             *      this job wasn't get executed for a few hours / days
             *   3) the user tended to import all new resources / platforms at the same time of day, thus bypassing
             *      the implicit optimization of trying to stagger the calculations by resource commit time
             *
             * 2/18/2010 NOTE: Limits weren't / aren't actually achieving the affect we want.  The baseline query
             * follows the general form of "insert into...select from <big query> having <subquery> limit X".
             * In this case, the limit was reducing the number of rows inserted, but it was still taking the full
             * cost of calculating everything that should have been inserted.  The limit was intended as a cheap
             * method of chunking or partitioning the work, but wasn't properly chunking the expensive
             * part - the "big query".  What we actually want to do is come of with a strategy that lessens the
             * amount of data we need to select, thereby reducing the amount of time it takes to calculate the
             * insertion list.
             *
             * One proposed strategy for this would be to chunk on the scheduleId.  So if there were, say,
             * 5M scheduleIds in the systems, we might take 500K of them at a time and then execute the
             * baseline insertion job 10 times against a much smaller set of data each time.  But the
             * complication here is how to calculate precise groups of 500K schedules at a time, and then
             * walk that chunked list.
             *
             * Another strategy would be to divy things up by resource type. Since a measurementSchedule is
             * linked to a measurementDefinition which is linked to a resourceType, we could very easily chunk
             * the insertion based off the schedules that belong to each resourceType.  This would create
             * one insert statement for each type of resource in system.  The complication here, however,
             * is that you may have millions of resources of one type, but hardly any resources of another.
             * So there's still a chance that some insertions proceed slowly (in the worst case).
             *
             * In any event, an appropriate chunking solution needs to be found, and that partitioning strategy
             * needs to replace the limits in the query today.
             */
            int inserted = measurementBaselineManager._calculateAutoBaselinesINSERT(amountOfData);
            totalInserted += inserted;
            // since we're batch 100K inserts at a time, we're done if we didn't have that many to insert
            if (inserted < 100000) {
                break;
            }
        }
        log.info("Calculated and inserted [" + totalInserted + "] new baselines. ("
                + (System.currentTimeMillis() - now) + ")ms");

        MeasurementMonitor.getMBean()
                .incrementBaselineCalculationTime(System.currentTimeMillis() - computeTime);

        agentStatusManager.updateByAutoBaselineCalculationJob();

        return computeTime;
    } catch (Exception e) {
        log.error("Failed to auto-calculate baselines", e);
        throw new RuntimeException("Auto-calculation failure", e);
    }
}

From source file:org.rhq.enterprise.server.measurement.MeasurementBaselineManagerBean.java

@TransactionAttribute(TransactionAttributeType.NEVER)
public MeasurementBaseline calculateAutoBaseline(Subject subject, Integer measurementScheduleId, long startDate,
        long endDate, boolean save) throws BaselineCreationException, MeasurementNotFoundException {

    MeasurementBaseline result = measurementBaselineManager.calculateAutoBaselineInNewTransaction(subject,
            measurementScheduleId, startDate, endDate, save);

    if (save) {//from   www  .  ja  va  2 s .  c  o  m
        // note, this executes in a new transaction so the baseline must already be committed to the database
        agentStatusManager.updateByMeasurementBaseline(result.getId());
    }

    return result;
}

From source file:org.rhq.enterprise.server.measurement.MeasurementBaselineManagerBean.java

@TransactionAttribute(TransactionAttributeType.NEVER)
public MeasurementBaseline calculateAutoBaseline(Subject subject, int groupId, int definitionId, long startDate,
        long endDate, boolean save) throws BaselineCreationException, MeasurementNotFoundException {

    MeasurementBaseline result = measurementBaselineManager.calculateAutoBaselineForGroupInNewTransaction(
            subject, groupId, definitionId, startDate, endDate, save);

    if (save) {/*  w w w.  j  a va 2  s  .  c o  m*/
        // note, this executes in a new transaction so the baseline must already be committed to the database
        agentStatusManager.updateByMeasurementBaseline(result.getId());
    }

    return result;
}

From source file:org.rhq.enterprise.server.resource.metadata.ResourceMetadataManagerBean.java

@TransactionAttribute(TransactionAttributeType.NEVER)
public void removeObsoleteTypes(Subject subject, String pluginName, PluginMetadataManager metadataCache) {

    Set<ResourceType> obsoleteTypes = new HashSet<ResourceType>();
    Set<ResourceType> legitTypes = new HashSet<ResourceType>();

    try {//from  w w  w.  j a  v  a 2 s .c o m
        resourceMetadataManager.getPluginTypes(subject, pluginName, legitTypes, obsoleteTypes, metadataCache);

        if (!obsoleteTypes.isEmpty()) {
            log.debug("Removing " + obsoleteTypes.size() + " obsolete types: " + obsoleteTypes + "...");
            removeResourceTypes(subject, obsoleteTypes, new HashSet<ResourceType>(obsoleteTypes));
        }

        // Now it's safe to remove any obsolete subcategories on the legit types.
        for (ResourceType legitType : legitTypes) {
            ResourceType updateType = metadataCache.getType(legitType.getName(), legitType.getPlugin());

            // If we've got a type from the descriptor which matches an existing one,
            // then let's see if we need to remove any subcategories from the existing one.

            // NOTE: I don't think updateType will ever be null here because we have previously verified
            // its existence above when we called resourceMetadataManager.getPluginTypes. All of the types contained
            // in legitTypes are all types found to exist in metadataCache. Therefore, I think that this null
            // check can be removed.
            //
            // jsanda - 11/11/2010
            if (updateType != null) {
                try {
                    resourceMetadataManager.removeObsoleteSubCategories(subject, updateType, legitType);
                } catch (Exception e) {
                    throw new Exception("Failed to delete obsolete subcategories from " + legitType + ".", e);
                }
            }
        }
    } catch (Exception e) {
        // Catch all exceptions, so a failure here does not cause the outer tx to rollback.
        log.error("Failure during removal of obsolete ResourceTypes and Subcategories.", e);
    }
}