List of usage examples for javax.transaction UserTransaction commit
void commit() throws RollbackException, HeuristicMixedException, HeuristicRollbackException, SecurityException, IllegalStateException, SystemException;
From source file:de.fme.topx.component.TopXUpdateComponent.java
/** * increase the hitcount for the given noderef by using the aspect * <code>topx:countable</code>. Does not fire events for other behaviours. * Using admin use to increment because not everybody has * /* ww w . jav a 2 s . c o m*/ * @param nodeRef * @param userName * current user who reads or updates the document. * @param counterUserProperty * @throws SystemException * @throws NotSupportedException * @throws HeuristicRollbackException * @throws HeuristicMixedException * @throws RollbackException * @throws IllegalStateException * @throws SecurityException */ @SuppressWarnings("unchecked") public Integer increaseHitcount(final NodeRef nodeRef, final String userName, final QName counterProperty, final QName counterDateProperty, final QName counterUserProperty) throws NotSupportedException, SystemException, SecurityException, IllegalStateException, RollbackException, HeuristicMixedException, HeuristicRollbackException { UserTransaction transaction = transactionService.getNonPropagatingUserTransaction(false); transaction.begin(); try { Preconditions.checkNotNull(nodeRef, "Passed noderef should not be null"); Preconditions.checkArgument(nodeService.exists(nodeRef), "Node[" + nodeRef + "] must exist in the repository"); filter.disableAllBehaviours(); Map<QName, Serializable> newProperties = Maps.newHashMap(); Integer counter = (Integer) nodeService.getProperty(nodeRef, counterProperty); if (counter == null) { counter = setHitCountProperties(nodeRef, counterProperty, counterDateProperty, counterUserProperty, newProperties, 1, userName); } else { boolean shouldCount = true; Map<QName, Serializable> properties = nodeService.getProperties(nodeRef); Serializable usersValue = properties.get(counterUserProperty); List<String> users; if (!(usersValue instanceof List)) { users = Lists.newArrayList((String) usersValue); } else { users = (List<String>) usersValue; } if (users != null) { int userIndex = users.indexOf(userName); if (userIndex != -1) { List<Date> counterDates = (List<Date>) properties.get(counterDateProperty); Date lastUserReadDate = counterDates.get(userIndex); // only count one download for a // document of // a user per day if (DateUtils.isSameDay(lastUserReadDate, new Date())) { shouldCount = false; LOG.info("User " + userName + " already downloads/updates document " + nodeRef + " today. Skip counting."); } } } if (shouldCount) { counter = setHitCountProperties(nodeRef, counterProperty, counterDateProperty, counterUserProperty, newProperties, counter, userName); } } transaction.commit(); LOG.info("Commiting transaction for Node " + nodeRef); return counter; } finally { filter.enableAllBehaviours(); if (transaction.getStatus() == javax.transaction.Status.STATUS_ACTIVE) { transaction.rollback(); LOG.warn("Had to rollback the transaction for Node " + nodeRef); } } }
From source file:it.doqui.index.ecmengine.business.job.move.MoveAggregationJob.java
private MoveAggregation getPropertiesFromAspect(NodeRef sourceNodeRef) throws DictionaryRuntimeException, NotSupportedException, SystemException, SecurityException, IllegalStateException, RollbackException, HeuristicMixedException, HeuristicRollbackException { logger.debug("[MoveAggregationJob::getPropertiesFromAspect] BEGIN"); MoveAggregation aggreg = null;/*from w w w.j a va 2 s.co m*/ try { UserTransaction userTxSource = transactionService.getNonPropagatingUserTransaction(); userTxSource.begin(); QName destinationAspect = resolvePrefixNameToQName("ecm-sys:destination"); if (nodeService.hasAspect(sourceNodeRef, destinationAspect)) { Map<QName, Serializable> nodeProp = nodeService.getProperties(sourceNodeRef); QName idNodeDestinationProp = resolvePrefixNameToQName("ecm-sys:idNodeDestination"); QName repoDestinationProp = resolvePrefixNameToQName("ecm-sys:repoDestination"); QName idNodeSourceProp = resolvePrefixNameToQName("ecm-sys:idNodeSource"); QName repoSourceProp = resolvePrefixNameToQName("ecm-sys:repoSource"); aggreg = new MoveAggregation(); aggreg.setIdDestinationParent((String) nodeProp.get(idNodeDestinationProp)); aggreg.setDestinationRepository((String) nodeProp.get(repoDestinationProp)); aggreg.setIdSourceNode((String) nodeProp.get(idNodeSourceProp)); aggreg.setSourceRepository((String) nodeProp.get(repoSourceProp)); } userTxSource.commit(); } finally { logger.debug("[MoveAggregationJob::getPropertiesFromAspect] END"); } return aggreg; }
From source file:it.doqui.index.ecmengine.business.job.move.MoveAggregationJob.java
private void moveCrossRepo(String sourceRepository, String idSourceNode, String destinationRepository, String idDestinationParent, NodeRef sourceNodeRef) throws NotSupportedException, SystemException, NodeRuntimeException, PermissionRuntimeException, AuthenticationRuntimeException, DictionaryRuntimeException, SecurityException, IllegalStateException, RollbackException, HeuristicMixedException, HeuristicRollbackException { logger.debug("[MoveAggregationJob::moveCrossRepo] BEGIN"); Node result = null;//w w w. j a v a 2s. co m UserTransaction userTxSource = null; UserTransaction userTxDest = null; String logCtx = "S: " + idSourceNode + " - SourceRepo: " + sourceRepository + " - D: " + idDestinationParent + " - DestRepo: " + destinationRepository; try { logger.debug("[MoveAggregationJob::moveCrossRepo] " + logCtx); userTxSource = transactionService.getNonPropagatingUserTransaction(); userTxSource.begin(); RepositoryManager.setCurrentRepository(sourceRepository); //authenticate as the system user authenticationComponent.setSystemUserAsCurrentUser(); ChildAssociationRef sourceParentRef = nodeService.getPrimaryParent(sourceNodeRef); logger.debug("[MoveAggregationJob::moveCrossRepo] Nodo Source Padre : " + sourceParentRef.getParentRef().getId()); QName destinationQName = sourceParentRef.getQName(); QName destinationAssocTypeQName = sourceParentRef.getTypeQName(); userTxSource.commit(); userTxDest = transactionService.getNonPropagatingUserTransaction(); userTxDest.begin(); RepositoryManager.setCurrentRepository(destinationRepository); //authenticate as the system user authenticationComponent.setSystemUserAsCurrentUser(); StoreRef spacesStoreDest = new StoreRef(StoreRef.PROTOCOL_WORKSPACE, "SpacesStore"); NodeRef destinationParentRef = new NodeRef(spacesStoreDest, idDestinationParent); boolean exist = nodeService.exists(destinationParentRef); logger.debug("[MoveAggregationJob::moveCrossRepo] Nodo Destination Padre: " + destinationParentRef.getId() + " esiste? " + exist); userTxDest.commit(); //copyAggregation userTxSource = transactionService.getNonPropagatingUserTransaction(); userTxSource.begin(); Map<NodeRef, NodeRef> copiedChildren = new HashMap<NodeRef, NodeRef>(); RepositoryManager.setCurrentRepository(sourceRepository); //authenticate as the system user authenticationComponent.setSystemUserAsCurrentUser(); NodeRef parentRef = sourceParentRef.getParentRef(); userTxSource.commit(); boolean copyChildren = true; NodeRef destinationNodeRef = null; //recursiveCopy -->permette di ricreare in deposito la stessa struttura in corrente //in realta crea i figli primari , invece i figli secondari e i nodi target delle associazioni //normali non vengono create nel deposito ma viene creata uan relazione verso il nodo //originario presente in corrente. //TODO //Eliminare o non fare creare relazioni dal secondario verso nodi del primario logger.debug("[MoveAggregationJob::moveCrossRepo] Inizio metodo ricorsivo : 'recursiveCopy'"); destinationNodeRef = recursiveCopy(sourceNodeRef, parentRef, destinationParentRef, destinationAssocTypeQName, destinationQName, copyChildren, copiedChildren, sourceRepository, destinationRepository); logger.debug("[MoveAggregationJob::moveCrossRepo] Fine metodo ricorsivo : 'recursiveCopy'"); dumpElapsed("MoveAggregationJob", "moveCrossRepo", logCtx, "Nodo Copia creato."); if (destinationNodeRef != null) { result = new Node(destinationNodeRef.getId(), destinationRepository); logger.debug("[MoveAggregationJob::moveCrossRepo] Uid Nodo Copia creato: " + result.getUid()); } userTxDest = transactionService.getNonPropagatingUserTransaction(); userTxDest.begin(); //Dal nodo padre sposato sul deposito elimino gli aspect state e destination RepositoryManager.setCurrentRepository(destinationRepository); //authenticate as the system user authenticationComponent.setSystemUserAsCurrentUser(); QName stateAspect = resolvePrefixNameToQName("ecm-sys:state"); nodeService.removeAspect(destinationNodeRef, stateAspect); dumpElapsed("MoveAggregationJob", "moveCrossRepo", logCtx, "Rimosso Aspect 'state' dal Nodo spostato."); logger.debug("[MoveAggregationJob::moveCrossRepo] Rimosso Aspect 'state' dal nodo : " + destinationNodeRef.getId()); QName destinationAspect = resolvePrefixNameToQName("ecm-sys:destination"); nodeService.removeAspect(destinationNodeRef, destinationAspect); dumpElapsed("MoveAggregationJob", "moveCrossRepo", logCtx, "Rimosso Aspect 'destination' dal Nodo spostato."); logger.debug("[MoveAggregationJob::moveCrossRepo] Rimosso Aspect 'destination' dal nodo : " + destinationNodeRef.getId()); userTxDest.commit(); // BEGIN DISABLE AGGREGATION userTxSource = transactionService.getNonPropagatingUserTransaction(); userTxSource.begin(); //Cancello i nodi figli del nodo source RepositoryManager.setCurrentRepository(sourceRepository); //authenticate as the system user authenticationComponent.setSystemUserAsCurrentUser(); List<ChildAssociationRef> childAssociations = nodeService.getChildAssocs(sourceNodeRef); int size = childAssociations != null ? childAssociations.size() : 0; logger.debug("[MoveAggregationJob::moveCrossRepo] Cancello " + size + " nodi/o figli."); if (size > 0) { for (ChildAssociationRef childAssoc : childAssociations) { if (childAssoc != null) { nodeService.removeChildAssociation(childAssoc); logger.debug("[MoveAggregationJob::moveCrossRepo] Associazione child eliminata."); dumpElapsed("MoveAggregationJob", "moveCrossRepo", logCtx, "Associazione child eliminata."); } } } //ecm-sys:ecmengineSystemModel //aspect ecm-sys:state //<property name="ecm-sys:stato"> //aspect ecm-sys:state //<property name="ecm-sys:stato"> proprieta dell'aspect QName stateProp = resolvePrefixNameToQName("ecm-sys:stato"); String valoreStatoNodo = "spostato"; //setto la proprieta ecm-sys:stato dell'aspect ecm-sys:state //del nodo source con valore "spostato" nodeService.setProperty(sourceNodeRef, stateProp, valoreStatoNodo); dumpElapsed("MoveAggregationJob", "moveCrossRepo", logCtx, "Modificata property 'stato' dell'Aspect 'state'"); //TODO: in realta l'aggregazione deve essere Cancellata del tutto e non disabilitata //con l'aggiunta di un aspect; si dovrebbe cancellare l'aggregazione solo dopo che //il job di spostamento e` andato a buon fine. logger.debug( "[MoveAggregationJob::moveCrossRepo] Modificata property 'stato' dell'Aspect 'state' del nodo : " + sourceNodeRef.getId()); //Dal nodo sorgente presente nel corrente elimino l'aspect destination nodeService.removeAspect(sourceNodeRef, destinationAspect); dumpElapsed("MoveAggregationJob", "moveCrossRepo", logCtx, "Rimosso Aspect 'destination' dal Nodo."); logger.debug("[MoveAggregationJob::moveCrossRepo] Rimosso Aspect 'destination' dal nodo : " + sourceNodeRef.getId()); //END DISABLE AGGREGATION userTxSource.commit(); //INSERIMENTO AUDIT insertAudit("MoveAggregationJob", "moveCrossRepo", logCtx, result.getUid(), "Source: " + sourceNodeRef.getId() + " RepoSource: " + sourceRepository + " -- Dest Parent: " + destinationParentRef.getId() + " RepoDest: " + destinationRepository); } finally { logger.debug("[MoveAggregationJob::moveCrossRepo] END"); } }
From source file:fr.openwide.talendalfresco.rest.server.command.LoginCommand.java
private User authenticate(String username, String password, String ticket) { // case of existing session user : getting alfresco ticket User existingSessionUser = null;/* w w w . j ava 2s.c o m*/ HttpSession session = httpRequest.getSession(false); if (session != null) { existingSessionUser = (User) session.getAttribute(AuthenticationHelper.AUTHENTICATION_USER); if (existingSessionUser != null) { String existingSessionTicket = existingSessionUser.getTicket(); // alternatives : // 1. using alfresco ticket rather than sso ticket to speed up things // NB. this means that before logging in a different user an explicit logout must be done // 2. using sso ticket rather than alfresco one // this requires never to give the ticket but when we want to relog, which is bothersome if (existingSessionTicket != null) { ticket = existingSessionTicket; } } } UserTransaction tx = null; try { // Authenticate via the authentication service, then save the details of user in an object // in the session - this is used by the servlet filter etc. on each page to check for login if (username != null && password != null) { // authentication using login (alfresco or sso), since user/pwd params (even empty ones) have been supplied // validation : RestServerHelper.validateUsername(session, username); RestServerHelper.validatePassword(session, password); // login : authenticationService.authenticate(username, password.toCharArray()); } else if (ticket != null && ticket.length() != 0) { // authentication using ticket (alfresco or sso), since non empty ticket has been supplied authenticationService.validate(ticket); } else { xmlResult.setError(RestCommandResult.CODE_ERROR_AUTH_MISSING, RestServerHelper.getMessage(session, RestServerHelper.MSG_ERROR_MISSING) + " : " + username, null); return null; } // Set the user name as stored by the back end username = authenticationService.getCurrentUserName(); if (existingSessionUser != null && existingSessionUser.getUserName().equals(username)) { // user was already logged in, nothing else to do return existingSessionUser; } // now setting up logged in user elements // using non propagated tx because already inside a tx (commandServlet) tx = transactionService.getNonPropagatingUserTransaction(); tx.begin(); // remove the session invalidated flag (used to remove last username cookie by AuthenticationFilter) if (session != null) { session.removeAttribute(AuthenticationHelper.SESSION_INVALIDATED); } // setup User object and Home space ID User user = new User(username, authenticationService.getCurrentTicket(), personService.getPerson(username)); NodeRef homeSpaceRef = (NodeRef) nodeService.getProperty(personService.getPerson(username), ContentModel.PROP_HOMEFOLDER); // check that the home space node exists - else user cannot login if (nodeService.exists(homeSpaceRef) == false) { throw new InvalidNodeRefException(homeSpaceRef); } user.setHomeSpaceId(homeSpaceRef.getId()); tx.commit(); tx = null; // clear this so we know not to rollback // put the User object in the Session - the authentication servlet will then allow // the app to continue without redirecting to the login page if (session == null) { session = httpRequest.getSession(true); // creating session if none yet } session.setAttribute(AuthenticationHelper.AUTHENTICATION_USER, user); // Set the current locale for Alfresco web app. NB. session exists now. I18NUtil.setLocale(Application.getLanguage(session, true)); return user; } catch (AuthenticationException ae) { xmlResult.setError(RestCommandResult.CODE_ERROR_AUTH_UNKNOWN_USER, RestServerHelper.getMessage(session, RestServerHelper.MSG_ERROR_UNKNOWN_USER) + " : " + username, ae); } catch (InvalidNodeRefException inre) { xmlResult.setError(RestCommandResult.CODE_ERROR_AUTH_UNKNOWN_USER, RestServerHelper.getMessage(session, Repository.ERROR_NOHOME) + " : " + inre.getNodeRef().getId() + " (" + username + ")", inre); } catch (Throwable e) { // Some other kind of serious failure xmlResult.setError("Unknown technical error when authenticating user " + username, null); } finally { try { if (tx != null) { tx.rollback(); } } catch (Exception tex) { } } return null; }
From source file:it.doqui.index.ecmengine.business.personalization.importer.ArchiveImporterJob.java
private int handleRootFolder(File folder, NodeRef parentNodeRef, QName parentAssocTypeQName, QName containerTypeQName, QName containerNamePropertyQName, QName containerAssocTypeQName, QName contentTypeQName, QName contentNamePropertyQName) throws Exception { logger.debug("[ArchiveImporterJob::handleRootFolder] BEGIN"); // Conto quanti dati sono stati scritti int nContent = 0; try {//from ww w . j a va2 s . c om // Prima si inizia col creare i singoli contenuti boolean bContent = false; { // Prendo un oggetto UserTransaction UserTransaction transaction = transactionService.getNonPropagatingUserTransaction(); try { // Inizio la transazione transaction.begin(); // Conto i content creati int nSubContent = 0; // Prima creo i content in una transazione File[] folderEntries = folder.listFiles(); for (File entry : folderEntries) { // Se e' una directory if (!entry.isDirectory()) { logger.debug("[ArchiveImporterJob::handleRootFolder] creating content: " + entry.getName() + ", nodeRef=" + parentNodeRef + ", association=" + parentAssocTypeQName); // Creo il contenuti if (createContent(entry, parentNodeRef, contentTypeQName, contentNamePropertyQName, parentAssocTypeQName)) { nSubContent++; } } } // Se ho inserito 0 content, e non si e' generata una eccezione, vuol dire che i dati inseriti // sono tutti dei doppioni, in questo caso, meto bContent a true, e lascio andare avanti l'algoritmo bContent = (nSubContent == 0); nContent += nSubContent; logger.debug("[ArchiveImporterJob::handleRootFolder] Content inseriti: " + nContent); // Nel caso che si chiami una commit, senza righe da committare // C'e' una eccezione. // TODO: gestire le transazioni da 0 contenuti .. ma .. cosa fare in questa situazione? transaction.commit(); // Se non ho ecezione sulla commit, indico come true la creazione content bContent = true; logger.debug("[ArchiveImporterJob::handleRootFolder] Content bool " + bContent); } catch (RollbackException re) { try { transaction.rollback(); } catch (Exception ee) { logger.debug("[ArchiveImporterJob::handleRootFolder] RollbackException"); } } catch (EcmEngineFoundationException e) { // Rollback try { transaction.rollback(); } catch (Exception ee) { logger.debug("[ArchiveImporterJob::handleRootFolder] EcmEngineFoundationException"); } } catch (Exception e) { logger.debug(e); throw e; } } // Se i contenuti vanno a buon fine, inizio a cancellarli da disco boolean bDelete = false; if (bContent) { try { // Prima creo i content in una transazione File[] folderEntries = folder.listFiles(); for (File entry : folderEntries) { // Se e' una directory if (!entry.isDirectory()) { // Cancello il contenuto entry.delete(); } } bDelete = true; } catch (Exception e) { logger.debug(e); throw e; } } // Se le delete vanno a buon fine, inizio a creare le directory if (bDelete) { try { boolean bDeleteFolder = true; // Per tutti i file della cartella File[] folderEntries = folder.listFiles(); for (File entry : folderEntries) { // Se e' una directory if (entry.isDirectory()) { // Create directory logger.debug("[ArchiveImporterJob::handleRootFolder] creating directory: " + entry.getName() + ", nodeRef=" + parentNodeRef + ", association=" + parentAssocTypeQName); // nodo di riferimento NodeRef nr = null; // Stranamente, per una get di dati, viene espressamente richiesta una transazione // Prendo un oggetto UserTransaction UserTransaction transaction = transactionService.getNonPropagatingUserTransaction(); try { // Inizio la transazione transaction.begin(); // Verifico se la cartella e' presente nel nodo padre nr = nodeService.getChildByName(parentNodeRef, parentAssocTypeQName, entry.getName()); // Anche se non ho fatto transaction.rollback(); } catch (Exception e) { logger.debug(e); throw e; } finally { // Anche se non ho fatto try { transaction.rollback(); } catch (Exception e) { } } // Prendo un oggetto UserTransaction transaction = transactionService.getNonPropagatingUserTransaction(); boolean bTrans = false; try { // Se non e' presente, provo a crearla if (nr == null) { bTrans = true; // Preparo le properties di un folder QName prefixedNameQName = resolvePrefixNameToQName("cm:" + entry.getName()); Map<QName, Serializable> props = new HashMap<QName, Serializable>(); props.put(containerNamePropertyQName, entry.getName()); // Inizio la transazione transaction.begin(); // Creo il folder ChildAssociationRef folderNodeRef = nodeService.createNode(parentNodeRef, parentAssocTypeQName, prefixedNameQName, containerTypeQName, props); // Nel caso che si chiami una commit, senza righe da committare // C'e' una eccezione. // TODO: gestire le transazioni da 0 contenuti transaction.commit(); nr = folderNodeRef.getChildRef(); } // Creazione del subfolder nContent += handleRootFolder(entry, nr, containerAssocTypeQName, // Non passo il parent, ma passo il containerAssocType nei folder figli containerTypeQName, containerNamePropertyQName, containerAssocTypeQName, contentTypeQName, contentNamePropertyQName); } catch (RollbackException re) { if (bTrans) { try { transaction.rollback(); } catch (Exception ee) { logger.debug(re); } } } catch (EcmEngineFoundationException e) { bDeleteFolder = false; // Rollback try { transaction.rollback(); } catch (Exception ee) { logger.debug(e); } } catch (Exception e) { logger.debug(e); throw e; } } } // Rimuovo la directory, se non ho avuto problemi rimuovendo le subdir if (bDeleteFolder) { folder.delete(); } } catch (Exception e) { logger.debug(e); throw e; } } } catch (Exception e) { logger.debug(e); throw e; } finally { logger.debug("[ArchiveImporterJob::handleRootFolder] END"); } return nContent; }
From source file:se.alingsas.alfresco.repo.utils.byggreda.ByggRedaUtil.java
/** * Import a document//from w w w .j ava 2 s . c o m * * @param site * @param sourcePath * @param document * @return */ private ByggRedaDocument importDocument(SiteInfo site, String sourcePath, ByggRedaDocument document) { final String currentDestinationPath = destinationPath + "/" + document.getPath(); UserTransaction trx = getTransactionService().getNonPropagatingUserTransaction(); try { trx.begin(); // Check if file exists already FileInfo repoFileFolder = getRepoFileFolder(site, currentDestinationPath + "/" + document.getFileName()); if (repoFileFolder != null) { if (this.updateExisting) { try { String fullFilenamePath = checkFileExistsIgnoreExtensionCase( sourcePath + "/" + document.getFileName()); if (fullFilenamePath == null) { throw new java.io.FileNotFoundException(); } File f = new File(fullFilenamePath); if (!f.exists()) { throw new java.io.FileNotFoundException(); } if (!f.exists()) { throw new java.io.FileNotFoundException(); } LOG.debug("File " + document.getFileName() + " already exists, attempting to creating a new version at " + currentDestinationPath); final NodeRef workingCopy = checkOutCheckInService.checkout(repoFileFolder.getNodeRef()); addProperties(workingCopy, document, true); createFile(workingCopy, site, sourcePath, document); final Map<String, Serializable> properties = new HashMap<String, Serializable>(); properties.put(VersionModel.PROP_VERSION_TYPE, VersionType.MAJOR); NodeRef checkin = checkOutCheckInService.checkin(workingCopy, properties); document.setNodeRef(checkin); if (checkin != null && nodeService.exists(checkin)) { document.setReadSuccessfully(true); document.setStatusMsg("Filen " + sourcePath + "/" + document.getFileName() + " uppdaterades till ny version"); trx.commit(); } else { document.setReadSuccessfully(false); document.setStatusMsg("Uppdatering av fil " + currentDestinationPath + "/" + document.getFileName() + " misslyckades."); LOG.error(document.getStatusMsg()); throw new Exception(document.getStatusMsg()); } } catch (java.io.FileNotFoundException e) { document.setReadSuccessfully(false); document.setStatusMsg("Inlsning av fil misslyckades, filen " + sourcePath + "/" + document.getFileName() + " kunde inte hittas."); LOG.error(document.getStatusMsg()); throw new Exception(document.getStatusMsg(), e); } catch (FileExistsException e) { document.setReadSuccessfully(false); document.setStatusMsg("Inlsning av fil misslyckades, mlfilen " + currentDestinationPath + " finns redan."); LOG.error(document.getStatusMsg()); throw new Exception(document.getStatusMsg(), e); } catch (Exception e) { document.setReadSuccessfully(false); document.setStatusMsg(e.getMessage()); LOG.error("Error importing document " + document.getRecordNumber(), e); throw new Exception(document.getStatusMsg(), e); } } else { document.setReadSuccessfully(false); document.setStatusMsg("Filen existerar redan, hoppar ver."); LOG.debug("File already exists"); trx.commit(); } } else { try { String fullFilenamePath = checkFileExistsIgnoreExtensionCase( sourcePath + "/" + document.getFileName()); if (fullFilenamePath == null) { throw new java.io.FileNotFoundException(); } File f = new File(fullFilenamePath); if (!f.exists()) { throw new java.io.FileNotFoundException(); } NodeRef folderNodeRef = createFolder(destinationPath, document.getPath(), document.getOriginalPath(), site); final FileInfo fileInfo = fileFolderService.create(folderNodeRef, document.getFileName(), AkDmModel.TYPE_AKDM_BYGGREDA_DOC); document.setNodeRef(fileInfo.getNodeRef()); addProperties(document.getNodeRef(), document, false); createFile(document.getNodeRef(), site, sourcePath, document); createVersionHistory(document.getNodeRef()); document.setReadSuccessfully(true); LOG.debug("Imported document " + document.getRecordDisplay()); trx.commit(); } catch (java.io.FileNotFoundException e) { document.setReadSuccessfully(false); document.setStatusMsg("Inlsning av fil misslyckades, filen " + sourcePath + "/" + document.getFileName() + " kunde inte hittas."); LOG.error(document.getStatusMsg()); throw new Exception(document.getStatusMsg(), e); } catch (FileExistsException e) { document.setReadSuccessfully(false); document.setStatusMsg("Inlsning av fil misslyckades, mlfilen " + currentDestinationPath + " finns redan."); LOG.error(document.getStatusMsg()); throw new Exception(document.getStatusMsg(), e); } catch (Exception e) { document.setReadSuccessfully(false); document.setStatusMsg("Fel vid inlsning av fil, systemmeddelande: " + e.getMessage()); LOG.error("Error importing document " + document.getRecordNumber(), e); throw new Exception(document.getStatusMsg(), e); } } } catch (Exception e) { try { if (trx.getStatus() == Status.STATUS_ACTIVE) { trx.rollback(); } else { LOG.error("The transaction was not active", e); } return document; } catch (Exception e2) { LOG.error("Exception: ", e); LOG.error("Exception while rolling back transaction", e2); throw new RuntimeException(e2); } } return document; }
From source file:com.sirma.itt.cmf.integration.alfresco3.CMFWorkflowDeployer.java
/** * Deploy the Workflow Definitions./*from w ww .j ava 2 s .c o m*/ */ public void init() { PropertyCheck.mandatory(this, "transactionService", transactionService); PropertyCheck.mandatory(this, "authenticationContext", authenticationContext); PropertyCheck.mandatory(this, "workflowService", workflowService); String currentUser = authenticationContext.getCurrentUserName(); if (currentUser == null) { authenticationContext.setSystemUserAsCurrentUser(); } if (!transactionService.getAllowWrite()) { if (logger.isWarnEnabled()) logger.warn("Repository is in read-only mode; not deploying workflows."); return; } UserTransaction userTransaction = transactionService.getUserTransaction(); try { userTransaction.begin(); // bootstrap the workflow models and static labels (from classpath) if (models != null && resourceBundles != null && ((models.size() > 0) || (resourceBundles.size() > 0))) { DictionaryBootstrap dictionaryBootstrap = new DictionaryBootstrap(); dictionaryBootstrap.setDictionaryDAO(dictionaryDAO); dictionaryBootstrap.setTenantService(tenantService); dictionaryBootstrap.setModels(models); dictionaryBootstrap.setLabels(resourceBundles); dictionaryBootstrap.bootstrap(); // also registers with // dictionary } // bootstrap the workflow definitions (from classpath) if (workflowDefinitions != null) { for (Properties workflowDefinition : workflowDefinitions) { // retrieve workflow specification String engineId = workflowDefinition.getProperty(ENGINE_ID); if (engineId == null || engineId.length() == 0) { throw new WorkflowException("Workflow Engine Id must be provided"); } String location = workflowDefinition.getProperty(LOCATION); if (location == null || location.length() == 0) { throw new WorkflowException("Workflow definition location must be provided"); } Boolean redeploy = Boolean.valueOf(workflowDefinition.getProperty(REDEPLOY)); String mimetype = workflowDefinition.getProperty(MIMETYPE); // retrieve input stream on workflow definition ClassPathResource workflowResource = new ClassPathResource(location); // deploy workflow definition if (!redeploy && workflowService.isDefinitionDeployed(engineId, workflowResource.getInputStream(), mimetype)) { if (logger.isDebugEnabled()) logger.debug("Workflow deployer: Definition '" + location + "' already deployed"); } else { if (!redeploy && workflowService.isDefinitionDeployed(engineId, workflowResource.getInputStream(), mimetype)) { if (logger.isDebugEnabled()) logger.debug("Workflow deployer: Definition '" + location + "' already deployed"); } else { WorkflowDeployment deployment = workflowService.deployDefinition(engineId, workflowResource.getInputStream(), workflowResource.getFilename()); logDeployment(location, deployment); } } } } userTransaction.commit(); } catch (Throwable e) { // rollback the transaction try { if (userTransaction != null) { userTransaction.rollback(); } } catch (Exception ex) { // NOOP } } finally { if (currentUser == null) { authenticationContext.clearCurrentSecurityContext(); } } }
From source file:it.doqui.index.ecmengine.business.job.move.MoveAggregationJob.java
private void moveIntraRepo(NodeRef sourceNodeRef, String sourceRepository, String idSourceNode, String idDestinationParent) throws NotSupportedException, SystemException, DictionaryRuntimeException, SecurityException, IllegalStateException, RollbackException, HeuristicMixedException, HeuristicRollbackException { //riclassificazioni(voci di titolario differenti) //spostamenti (la stessa voce di titolario) logger.debug("[MoveAggregationJob::moveIntraRepo] BEGIN"); Node result = null;// ww w . j ava2s . c o m String logCtx = "S: " + idSourceNode + " - D: " + idDestinationParent; try { UserTransaction userTxSource = transactionService.getNonPropagatingUserTransaction(); userTxSource.begin(); RepositoryManager.setCurrentRepository(sourceRepository); logger.debug("[MoveAggregationJob::moveIntraRepo] Spostamento da Corrente a Corrente"); StoreRef spacesStore = new StoreRef(StoreRef.PROTOCOL_WORKSPACE, "SpacesStore"); NodeRef destinationParentRef = new NodeRef(spacesStore, idDestinationParent); ChildAssociationRef sourceParentRef = nodeService.getPrimaryParent(sourceNodeRef); QName destinationQName = sourceParentRef.getQName(); QName destinationAssocTypeQName = sourceParentRef.getTypeQName(); logger.debug( "[MoveAggregationJob::moveIntraRepo] Nome Nuova Associazione : " + destinationQName.toString()); logger.debug("[MoveAggregationJob::moveIntraRepo] Tipo Nuova Associazione : " + destinationAssocTypeQName.toString()); NodeRef copyNodeRef = copyService.copyAndRename(sourceNodeRef, destinationParentRef, destinationAssocTypeQName, destinationQName, true); //NodeRef copyNodeRef = copyService.copy(sourceNodeRef, destinationParentRef, destinationAssocTypeQName,destinationQName, true); result = new Node(copyNodeRef.getId()); dumpElapsed("MoveAggregationJob", "moveIntraRepo", logCtx, "Nodo Copia creato."); logger.debug("[MoveAggregationJob::moveIntraRepo] Uid Nodo Copia creato: " + result.getUid()); QName stateAspect = resolvePrefixNameToQName("ecm-sys:state"); nodeService.removeAspect(copyNodeRef, stateAspect); dumpElapsed("MoveAggregationJob", "moveIntraRepo", logCtx, "Rimosso Aspect 'state' dal Nodo copiato."); logger.debug( "[MoveAggregationJob::moveIntraRepo] Rimosso Aspect 'state' dal nodo : " + copyNodeRef.getId()); QName destinationAspect = resolvePrefixNameToQName("ecm-sys:destination"); nodeService.removeAspect(copyNodeRef, destinationAspect); dumpElapsed("MoveAggregationJob", "moveIntraRepo", logCtx, "Rimosso Aspect 'destination' dal Nodo copiato."); logger.debug("[MoveAggregationJob::moveIntraRepo] Rimosso Aspect 'destination' dal nodo : " + copyNodeRef.getId()); //TODO: //in questo caso (da corrente a corrente) cosa fare dell'aggragazione sorgente?? // si deve distinguere tra riclassificazione e spostamento? // a quanto pare in caso di riclassificazione l' aggregazione nella source deve rimanere // ma senza contenuti; // in caso di spostamento invece l'aggregazione va spostata in destination e cancellata // dalla source //Riepilogando: // Riclassificazione : --> metodo copy di copyService e succesive modifiche all'aggregazione //in source: //assume uno stato "R" = riclassificato //e` vuoto di contenuti,conserva i suoi metadati // Spostamento : --> metodo moveNode di nodeService?? questo metodo va bene?? Non copia i figli? //Implementazione solo di Riclassificazione //Cancello i nodi figli del nodo source(basta questo??) List<ChildAssociationRef> childAssociations = nodeService.getChildAssocs(sourceNodeRef); int size = childAssociations != null ? childAssociations.size() : 0; logger.debug("[MoveAggregationJob::moveIntraRepo] Cancellare " + size + " nodi figli."); if (size > 0) { for (ChildAssociationRef childAssoc : childAssociations) { if (childAssoc != null) { nodeService.removeChildAssociation(childAssoc); logger.debug("[MoveAggregationJob::moveIntraRepo] Associazione child eliminata."); dumpElapsed("MoveAggregationJob", "moveIntraRepo", logCtx, "Associazione child eliminata."); } } } //<property name="ecm-sys:stato"> proprieta dell'aspect "state" QName stateProp = resolvePrefixNameToQName("ecm-sys:stato"); String valoreStatoNodo = "riclassificato"; //add aspect ecm-sys:state con proprieta ecm-sys:stato di valore "ri-classificato" //al nodo source //nodeService.addAspect(sourceNodeRef, stateAspect, stateAspectProps); //in realta l'aspect e` gia esistente; bisogna modificare il valore // della proprieta da spostabile a ri-classificato nodeService.setProperty(sourceNodeRef, stateProp, valoreStatoNodo); dumpElapsed("MoveAggregationJob", "moveIntraRepo", logCtx, "Modificata property 'stato' dell'Aspect 'state' del nodo."); logger.debug("[MoveAggregationJob::moveIntraRepo] Modificata property 'stato' dell'Aspect 'state' " + "del nodo : " + sourceNodeRef.getId()); userTxSource.commit(); // INSERIMENTO AUDIT insertAudit("MoveAggregationJob", "moveIntraRepo", logCtx, result.getUid(), "Source :" + sourceNodeRef.getId() + " -- Destination Parent : " + destinationParentRef.getId()); } finally { logger.debug("[MoveAggregationJob::moveIntraRepo] END"); } }
From source file:org.alfresco.filesys.alfresco.AlfrescoTxDiskDriver.java
/** * End an active transaction/*from w w w. j ava 2s . c o m*/ * * @param sess SrvSession * @param tx Object */ public void endTransaction(SrvSession sess, Object tx) { // Check that the transaction object is valid if (tx == null) return; // Get the filesystem transaction FilesysTransaction filesysTx = (FilesysTransaction) tx; // Check if there is an active transaction if (filesysTx != null && filesysTx.hasTransaction()) { // Get the active transaction UserTransaction ftx = filesysTx.getTransaction(); try { // Commit or rollback the transaction if (ftx.getStatus() == Status.STATUS_MARKED_ROLLBACK || ftx.getStatus() == Status.STATUS_ROLLEDBACK || ftx.getStatus() == Status.STATUS_ROLLING_BACK) { // Transaction is marked for rollback ftx.rollback(); // DEBUG if (logger.isDebugEnabled()) logger.debug("End transaction (rollback)"); } else { // Commit the transaction ftx.commit(); // DEBUG if (logger.isDebugEnabled()) logger.debug("End transaction (commit)"); } } catch (Exception ex) { if (logger.isDebugEnabled()) logger.debug("Failed to end transaction, " + ex.getMessage()); // throw new AlfrescoRuntimeException("Failed to end transaction", ex); } finally { // Clear the current transaction sess.clearTransaction(); } } }
From source file:org.alfresco.filesys.alfresco.AlfrescoTxDiskDriver.java
/** * Create and start a transaction, if not already active * /* w ww. ja v a2s.c om*/ * @param sess SrvSession * @param readOnly boolean * @exception AlfrescoRuntimeException */ private final void beginTransaction(SrvSession sess, boolean readOnly) throws AlfrescoRuntimeException { // Do nothing if we are already in a retrying transaction Boolean inRetryingTransaction = m_inRetryingTransaction.get(); if (inRetryingTransaction != null && inRetryingTransaction) { return; } // Initialize the per session thread local that holds the transaction sess.initializeTransactionObject(); // Get the filesystem transaction FilesysTransaction filesysTx = (FilesysTransaction) sess.getTransactionObject().get(); if (filesysTx == null) { filesysTx = new FilesysTransaction(); sess.getTransactionObject().set(filesysTx); } // If there is an active transaction check that it is the required type if (filesysTx.hasTransaction()) { // Get the active transaction UserTransaction tx = filesysTx.getTransaction(); // Check if the current transaction is marked for rollback try { if (tx.getStatus() == Status.STATUS_MARKED_ROLLBACK || tx.getStatus() == Status.STATUS_ROLLEDBACK || tx.getStatus() == Status.STATUS_ROLLING_BACK) { // Rollback the current transaction tx.rollback(); } } catch (Exception ex) { } // Check if the transaction is a write transaction, if write has been requested if (readOnly == false && filesysTx.isReadOnly() == true) { // Commit the read-only transaction try { tx.commit(); } catch (Exception ex) { throw new AlfrescoRuntimeException( "Failed to commit read-only transaction, " + ex.getMessage()); } finally { // Clear the active transaction filesysTx.clearTransaction(); } } } // Create the transaction if (filesysTx.hasTransaction() == false) { try { // Create a new transaction UserTransaction userTrans = m_transactionService.getUserTransaction(readOnly); userTrans.begin(); // Store the transaction filesysTx.setTransaction(userTrans, readOnly); // DEBUG if (logger.isDebugEnabled()) logger.debug("Created transaction readOnly=" + readOnly); } catch (Exception ex) { throw new AlfrescoRuntimeException("Failed to create transaction, " + ex.getMessage()); } } // Store the transaction callback sess.setTransaction(this); }