List of usage examples for java.lang IllegalStateException getMessage
public String getMessage()
From source file:org.finra.herd.dao.S3DaoTest.java
@Test public void testTagVersionsAmazonServiceException() { // Create an S3 file transfer request parameters DTO to access S3 objects with a mocked S3 bucket name that would trigger an AWS exception. S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(MockS3OperationsImpl.MOCK_S3_BUCKET_NAME_INTERNAL_ERROR); // Create an S3 version summary. S3VersionSummary s3VersionSummary = new S3VersionSummary(); s3VersionSummary.setKey(S3_KEY);//w w w .j av a2 s. c o m s3VersionSummary.setVersionId(S3_VERSION_ID); // Create an S3 object tag. Tag tag = new Tag(S3_OBJECT_TAG_KEY, S3_OBJECT_TAG_VALUE); try { s3Dao.tagVersions(params, new S3FileTransferRequestParamsDto(), Collections.singletonList(s3VersionSummary), tag); fail(); } catch (IllegalStateException e) { assertEquals( String.format("Failed to tag S3 object with \"%s\" key and \"%s\" version id in \"%s\" bucket. " + "Reason: InternalError (Service: null; Status Code: 0; Error Code: InternalError; Request ID: null)", S3_KEY, S3_VERSION_ID, MockS3OperationsImpl.MOCK_S3_BUCKET_NAME_INTERNAL_ERROR), e.getMessage()); } }
From source file:org.apache.jackrabbit.core.ItemImpl.java
/** * {@inheritDoc}/* www .j a v a2 s . co m*/ */ public void save() throws AccessDeniedException, ItemExistsException, ConstraintViolationException, InvalidItemStateException, ReferentialIntegrityException, VersionException, LockException, NoSuchNodeTypeException, RepositoryException { // check state of this instance sanityCheck(); // synchronize on this session synchronized (session) { /** * build list of transient (i.e. new & modified) states that * should be persisted */ Collection dirty = getTransientStates(); if (dirty.size() == 0) { // no transient items, nothing to do here return; } /** * build list of transient descendants in the attic * (i.e. those marked as 'removed') */ Collection removed = getRemovedStates(); /** * build set of item id's which are within the scope of * (i.e. affected by) this save operation */ Set affectedIds = new HashSet(dirty.size() + removed.size()); for (Iterator it = new IteratorChain(dirty.iterator(), removed.iterator()); it.hasNext();) { affectedIds.add(((ItemState) it.next()).getId()); } /** * make sure that this save operation is totally 'self-contained' * and independent; items within the scope of this save operation * must not have 'external' dependencies; * (e.g. moving a node requires that the target node including both * old and new parents are saved) */ for (Iterator it = new IteratorChain(dirty.iterator(), removed.iterator()); it.hasNext();) { ItemState transientState = (ItemState) it.next(); if (transientState.isNode()) { NodeState nodeState = (NodeState) transientState; Set dependentIDs = new HashSet(); if (nodeState.hasOverlayedState()) { NodeState overlayedState = (NodeState) nodeState.getOverlayedState(); NodeId oldParentId = overlayedState.getParentId(); NodeId newParentId = nodeState.getParentId(); if (oldParentId != null) { if (newParentId == null) { // node has been removed, add old parents // to dependencies if (overlayedState.isShareable()) { dependentIDs.addAll(overlayedState.getSharedSet()); } else { dependentIDs.add(oldParentId); } } else { if (!oldParentId.equals(newParentId)) { // node has been moved to a new location, // add old and new parent to dependencies dependentIDs.add(oldParentId); dependentIDs.add(newParentId); } else { // parent id hasn't changed, check whether // the node has been renamed (JCR-1034) if (!affectedIds.contains(newParentId) && stateMgr.hasTransientItemState(newParentId)) { try { NodeState parent = (NodeState) stateMgr .getTransientItemState(newParentId); // check parent's renamed child node entries for (Iterator cneIt = parent.getRenamedChildNodeEntries() .iterator(); cneIt.hasNext();) { ChildNodeEntry cne = (ChildNodeEntry) cneIt.next(); if (cne.getId().equals(nodeState.getId())) { // node has been renamed, // add parent to dependencies dependentIDs.add(newParentId); } } } catch (ItemStateException ise) { // should never get here log.warn("failed to retrieve transient state: " + newParentId, ise); } } } } } } // removed child node entries for (Iterator cneIt = nodeState.getRemovedChildNodeEntries().iterator(); cneIt.hasNext();) { ChildNodeEntry cne = (ChildNodeEntry) cneIt.next(); dependentIDs.add(cne.getId()); } // added child node entries for (Iterator cneIt = nodeState.getAddedChildNodeEntries().iterator(); cneIt.hasNext();) { ChildNodeEntry cne = (ChildNodeEntry) cneIt.next(); dependentIDs.add(cne.getId()); } // now walk through dependencies and check whether they // are within the scope of this save operation Iterator depIt = dependentIDs.iterator(); while (depIt.hasNext()) { NodeId id = (NodeId) depIt.next(); if (!affectedIds.contains(id)) { // JCR-1359 workaround: check whether unresolved // dependencies originate from 'this' session; // otherwise ignore them if (stateMgr.hasTransientItemState(id) || stateMgr.hasTransientItemStateInAttic(id)) { // need to save dependency as well String msg = itemMgr.safeGetJCRPath(id) + " needs to be saved as well."; log.debug(msg); throw new ConstraintViolationException(msg); } } } } } /** * validate access and node type constraints * (this will also validate child removals) */ validateTransientItems(dirty.iterator(), removed.iterator()); // start the update operation try { stateMgr.edit(); } catch (IllegalStateException e) { String msg = "Unable to start edit operation"; log.debug(msg); throw new RepositoryException(msg, e); } boolean succeeded = false; try { // process transient items marked as 'removed' removeTransientItems(removed.iterator()); // process transient items that have change in mixins processShareableNodes(dirty.iterator()); // initialize version histories for new nodes (might generate new transient state) if (initVersionHistories(dirty.iterator())) { // re-build the list of transient states because the previous call // generated new transient state dirty = getTransientStates(); } // process 'new' or 'modified' transient states persistTransientItems(dirty.iterator()); // dispose the transient states marked 'new' or 'modified' // at this point item state data is pushed down one level, // node instances are disconnected from the transient // item state and connected to the 'overlayed' item state. // transient item states must be removed now. otherwise // the session item state provider will return an orphaned // item state which is not referenced by any node instance. for (Iterator it = dirty.iterator(); it.hasNext();) { ItemState transientState = (ItemState) it.next(); // dispose the transient state, it is no longer used stateMgr.disposeTransientItemState(transientState); } // end update operation stateMgr.update(); // update operation succeeded succeeded = true; } catch (StaleItemStateException e) { throw new InvalidItemStateException(e.getMessage()); } catch (ItemStateException e) { String msg = safeGetJCRPath() + ": unable to update item."; log.debug(msg); throw new RepositoryException(msg, e); } finally { if (!succeeded) { // update operation failed, cancel all modifications stateMgr.cancel(); // JCR-288: if an exception has been thrown during // update() the transient changes have already been // applied by persistTransientItems() and we need to // restore transient state, i.e. undo the effect of // persistTransientItems() restoreTransientItems(dirty.iterator()); } } // now it is safe to dispose the transient states: // dispose the transient states marked 'removed'. // item states in attic are removed after store, because // the observation mechanism needs to build paths of removed // items in store(). for (Iterator it = removed.iterator(); it.hasNext();) { ItemState transientState = (ItemState) it.next(); // dispose the transient state, it is no longer used stateMgr.disposeTransientItemStateInAttic(transientState); } } }
From source file:org.apache.jackrabbit.core.ItemImpl.java
/** * {@inheritDoc}//www . java 2 s .c om */ public void save() throws AccessDeniedException, ItemExistsException, ConstraintViolationException, InvalidItemStateException, ReferentialIntegrityException, VersionException, LockException, NoSuchNodeTypeException, RepositoryException { // check state of this instance sanityCheck(); // synchronize on this session synchronized (session) { /** * build list of transient (i.e. new & modified) states that * should be persisted */ Collection dirty = getTransientStates(); if (dirty.size() == 0) { // no transient items, nothing to do here return; } /** * build list of transient descendants in the attic * (i.e. those marked as 'removed') */ Collection removed = getRemovedStates(); /** * build set of item id's which are within the scope of * (i.e. affected by) this save operation */ Set affectedIds = new HashSet(dirty.size() + removed.size()); for (Iterator it = new IteratorChain(dirty.iterator(), removed.iterator()); it.hasNext();) { affectedIds.add(((ItemState) it.next()).getId()); } /** * make sure that this save operation is totally 'self-contained' * and independent; items within the scope of this save operation * must not have 'external' dependencies; * (e.g. moving a node requires that the target node including both * old and new parents are saved) */ for (Iterator it = new IteratorChain(dirty.iterator(), removed.iterator()); it.hasNext();) { ItemState transientState = (ItemState) it.next(); if (transientState.isNode()) { NodeState nodeState = (NodeState) transientState; Set dependentIDs = new HashSet(); if (nodeState.hasOverlayedState()) { NodeState overlayedState = (NodeState) nodeState.getOverlayedState(); NodeId oldParentId = overlayedState.getParentId(); NodeId newParentId = nodeState.getParentId(); if (oldParentId != null) { if (newParentId == null) { // node has been removed, add old parents // to dependencies if (overlayedState.isShareable()) { dependentIDs.addAll(overlayedState.getSharedSet()); } else { dependentIDs.add(oldParentId); } } else { if (!oldParentId.equals(newParentId)) { // node has been moved to a new location, // add old and new parent to dependencies dependentIDs.add(oldParentId); dependentIDs.add(newParentId); } else { // parent id hasn't changed, check whether // the node has been renamed (JCR-1034) if (!affectedIds.contains(newParentId) && stateMgr.hasTransientItemState(newParentId)) { try { NodeState parent = (NodeState) stateMgr .getTransientItemState(newParentId); // check parent's renamed child node entries for (Iterator cneIt = parent.getRenamedChildNodeEntries() .iterator(); cneIt.hasNext();) { NodeState.ChildNodeEntry cne = (NodeState.ChildNodeEntry) cneIt .next(); if (cne.getId().equals(nodeState.getId())) { // node has been renamed, // add parent to dependencies dependentIDs.add(newParentId); } } } catch (ItemStateException ise) { // should never get here log.warn("failed to retrieve transient state: " + newParentId, ise); } } } } } } // removed child node entries for (Iterator cneIt = nodeState.getRemovedChildNodeEntries().iterator(); cneIt.hasNext();) { NodeState.ChildNodeEntry cne = (NodeState.ChildNodeEntry) cneIt.next(); dependentIDs.add(cne.getId()); } // added child node entries for (Iterator cneIt = nodeState.getAddedChildNodeEntries().iterator(); cneIt.hasNext();) { NodeState.ChildNodeEntry cne = (NodeState.ChildNodeEntry) cneIt.next(); dependentIDs.add(cne.getId()); } // now walk through dependencies and check whether they // are within the scope of this save operation Iterator depIt = dependentIDs.iterator(); while (depIt.hasNext()) { NodeId id = (NodeId) depIt.next(); if (!affectedIds.contains(id)) { // JCR-1359 workaround: check whether unresolved // dependencies originate from 'this' session; // otherwise ignore them if (stateMgr.hasTransientItemState(id) || stateMgr.hasTransientItemStateInAttic(id)) { // need to save dependency as well String msg = itemMgr.safeGetJCRPath(id) + " needs to be saved as well."; log.debug(msg); throw new ConstraintViolationException(msg); } } } } } /** * validate access and node type constraints * (this will also validate child removals) */ validateTransientItems(dirty.iterator(), removed.iterator()); // start the update operation try { stateMgr.edit(); } catch (IllegalStateException e) { String msg = "Unable to start edit operation"; log.debug(msg); throw new RepositoryException(msg, e); } boolean succeeded = false; try { // process transient items marked as 'removed' removeTransientItems(removed.iterator()); // process transient items that have change in mixins processShareableNodes(dirty.iterator()); // initialize version histories for new nodes (might generate new transient state) if (initVersionHistories(dirty.iterator())) { // re-build the list of transient states because the previous call // generated new transient state dirty = getTransientStates(); } // process 'new' or 'modified' transient states persistTransientItems(dirty.iterator()); // dispose the transient states marked 'new' or 'modified' // at this point item state data is pushed down one level, // node instances are disconnected from the transient // item state and connected to the 'overlayed' item state. // transient item states must be removed now. otherwise // the session item state provider will return an orphaned // item state which is not referenced by any node instance. for (Iterator it = dirty.iterator(); it.hasNext();) { ItemState transientState = (ItemState) it.next(); // dispose the transient state, it is no longer used stateMgr.disposeTransientItemState(transientState); } // end update operation stateMgr.update(); // update operation succeeded succeeded = true; } catch (StaleItemStateException e) { throw new InvalidItemStateException(e.getMessage()); } catch (ItemStateException e) { String msg = safeGetJCRPath() + ": unable to update item."; log.debug(msg); throw new RepositoryException(msg, e); } finally { if (!succeeded) { // update operation failed, cancel all modifications stateMgr.cancel(); // JCR-288: if an exception has been thrown during // update() the transient changes have already been // applied by persistTransientItems() and we need to // restore transient state, i.e. undo the effect of // persistTransientItems() restoreTransientItems(dirty.iterator()); } } // now it is safe to dispose the transient states: // dispose the transient states marked 'removed'. // item states in attic are removed after store, because // the observation mechanism needs to build paths of removed // items in store(). for (Iterator it = removed.iterator(); it.hasNext();) { ItemState transientState = (ItemState) it.next(); // dispose the transient state, it is no longer used stateMgr.disposeTransientItemStateInAttic(transientState); } } }
From source file:org.apache.jackrabbit.core.ItemImpl.java
/** * {@inheritDoc}/*from ww w . ja v a 2s. c o m*/ */ public void save() throws AccessDeniedException, ItemExistsException, ConstraintViolationException, InvalidItemStateException, ReferentialIntegrityException, VersionException, LockException, NoSuchNodeTypeException, RepositoryException { // check state of this instance sanityCheck(); // synchronize on this session synchronized (session) { /** * build list of transient (i.e. new & modified) states that * should be persisted */ Collection dirty = getTransientStates(); if (dirty.size() == 0) { // no transient items, nothing to do here return; } /** * build list of transient descendants in the attic * (i.e. those marked as 'removed') */ Collection removed = getRemovedStates(); /** * build set of item id's which are within the scope of * (i.e. affected by) this save operation */ Set affectedIds = new HashSet(dirty.size() + removed.size()); for (Iterator it = new IteratorChain(dirty.iterator(), removed.iterator()); it.hasNext();) { affectedIds.add(((ItemState) it.next()).getId()); } /** * make sure that this save operation is totally 'self-contained' * and independent; items within the scope of this save operation * must not have 'external' dependencies; * (e.g. moving a node requires that the target node including both * old and new parents are saved) */ for (Iterator it = new IteratorChain(dirty.iterator(), removed.iterator()); it.hasNext();) { ItemState transientState = (ItemState) it.next(); if (transientState.isNode()) { NodeState nodeState = (NodeState) transientState; Set dependentIDs = new HashSet(); if (nodeState.hasOverlayedState()) { NodeState overlayedState = (NodeState) nodeState.getOverlayedState(); NodeId oldParentId = overlayedState.getParentId(); NodeId newParentId = nodeState.getParentId(); if (oldParentId != null) { if (newParentId == null) { // node has been removed, add old parents // to dependencies if (overlayedState.isShareable()) { dependentIDs.addAll(overlayedState.getSharedSet()); } else { dependentIDs.add(oldParentId); } } else { if (!oldParentId.equals(newParentId)) { // node has been moved to a new location, // add old and new parent to dependencies dependentIDs.add(oldParentId); dependentIDs.add(newParentId); } else { // parent id hasn't changed, check whether // the node has been renamed (JCR-1034) if (!affectedIds.contains(newParentId) && stateMgr.hasTransientItemState(newParentId)) { try { NodeState parent = (NodeState) stateMgr .getTransientItemState(newParentId); // check parent's renamed child node entries for (Iterator cneIt = parent.getRenamedChildNodeEntries() .iterator(); cneIt.hasNext();) { ChildNodeEntry cne = (ChildNodeEntry) cneIt.next(); if (cne.getId().equals(nodeState.getId())) { // node has been renamed, // add parent to dependencies dependentIDs.add(newParentId); } } } catch (ItemStateException ise) { // should never get here log.warn("failed to retrieve transient state: " + newParentId, ise); } } } } } } // removed child node entries for (Iterator cneIt = nodeState.getRemovedChildNodeEntries().iterator(); cneIt.hasNext();) { ChildNodeEntry cne = (ChildNodeEntry) cneIt.next(); dependentIDs.add(cne.getId()); } // added child node entries for (Iterator cneIt = nodeState.getAddedChildNodeEntries().iterator(); cneIt.hasNext();) { ChildNodeEntry cne = (ChildNodeEntry) cneIt.next(); dependentIDs.add(cne.getId()); } // now walk through dependencies and check whether they // are within the scope of this save operation Iterator depIt = dependentIDs.iterator(); while (depIt.hasNext()) { NodeId id = (NodeId) depIt.next(); if (!affectedIds.contains(id)) { // JCR-1359 workaround: check whether unresolved // dependencies originate from 'this' session; // otherwise ignore them if (stateMgr.hasTransientItemState(id) || stateMgr.hasTransientItemStateInAttic(id)) { // need to save dependency as well String msg = itemMgr.safeGetJCRPath(id) + " needs to be saved as well."; log.debug(msg); throw new ConstraintViolationException(msg); } } } } } /** * validate access and node type constraints * (this will also validate child removals) */ validateTransientItems(dirty.iterator(), removed.iterator()); // start the update operation try { stateMgr.edit(); } catch (IllegalStateException e) { String msg = "Unable to start edit operation"; log.debug(msg); throw new RepositoryException(msg, e); } boolean succeeded = false; try { // process transient items marked as 'removed' removeTransientItems(removed.iterator()); // process transient items that have change in mixins processShareableNodes(dirty.iterator()); // initialize version histories for new nodes (might generate new transient state) if (initVersionHistories(dirty.iterator())) { // re-build the list of transient states because the previous call // generated new transient state dirty = getTransientStates(); } // process 'new' or 'modified' transient states persistTransientItems(dirty.iterator()); // dispose the transient states marked 'new' or 'modified' // at this point item state data is pushed down one level, // node instances are disconnected from the transient // item state and connected to the 'overlayed' item state. // transient item states must be removed now. otherwise // the session item state provider will return an orphaned // item state which is not referenced by any node instance. for (Iterator it = dirty.iterator(); it.hasNext();) { ItemState transientState = (ItemState) it.next(); // dispose the transient state, it is no longer used stateMgr.disposeTransientItemState(transientState); } // end update operation stateMgr.update(); // update operation succeeded succeeded = true; } catch (StaleItemStateException e) { throw new InvalidItemStateException(e.getMessage()); } catch (ItemStateException e) { throw new RepositoryException("Unable to update item: " + this, e); } finally { if (!succeeded) { // update operation failed, cancel all modifications stateMgr.cancel(); // JCR-288: if an exception has been thrown during // update() the transient changes have already been // applied by persistTransientItems() and we need to // restore transient state, i.e. undo the effect of // persistTransientItems() restoreTransientItems(dirty.iterator()); } } // now it is safe to dispose the transient states: // dispose the transient states marked 'removed'. // item states in attic are removed after store, because // the observation mechanism needs to build paths of removed // items in store(). for (Iterator it = removed.iterator(); it.hasNext();) { ItemState transientState = (ItemState) it.next(); // dispose the transient state, it is no longer used stateMgr.disposeTransientItemStateInAttic(transientState); } } }
From source file:org.apache.jackrabbit.core.ItemImpl.java
/** * {@inheritDoc}/* www .j a va2 s . c o m*/ */ public void save() throws AccessDeniedException, ItemExistsException, ConstraintViolationException, InvalidItemStateException, ReferentialIntegrityException, VersionException, LockException, NoSuchNodeTypeException, RepositoryException { // check state of this instance sanityCheck(); // synchronize on this session synchronized (session) { /** * build list of transient (i.e. new & modified) states that * should be persisted */ Collection<ItemState> dirty = getTransientStates(); if (dirty.size() == 0) { // no transient items, nothing to do here return; } /** * build list of transient descendants in the attic * (i.e. those marked as 'removed') */ Collection<ItemState> removed = getRemovedStates(); /** * build set of item id's which are within the scope of * (i.e. affected by) this save operation */ Set<ItemId> affectedIds = new HashSet<ItemId>(dirty.size() + removed.size()); for (Iterator<ItemState> it = new IteratorChain(dirty.iterator(), removed.iterator()); it.hasNext();) { affectedIds.add(it.next().getId()); } /** * make sure that this save operation is totally 'self-contained' * and independent; items within the scope of this save operation * must not have 'external' dependencies; * (e.g. moving a node requires that the target node including both * old and new parents are saved) */ for (Iterator<ItemState> it = new IteratorChain(dirty.iterator(), removed.iterator()); it.hasNext();) { ItemState transientState = it.next(); if (transientState.isNode()) { NodeState nodeState = (NodeState) transientState; Set<NodeId> dependentIDs = new HashSet<NodeId>(); if (nodeState.hasOverlayedState()) { NodeState overlayedState = (NodeState) nodeState.getOverlayedState(); NodeId oldParentId = overlayedState.getParentId(); NodeId newParentId = nodeState.getParentId(); if (oldParentId != null) { if (newParentId == null) { // node has been removed, add old parents // to dependencies if (overlayedState.isShareable()) { dependentIDs.addAll(overlayedState.getSharedSet()); } else { dependentIDs.add(oldParentId); } } else { if (!oldParentId.equals(newParentId)) { // node has been moved to a new location, // add old and new parent to dependencies dependentIDs.add(oldParentId); dependentIDs.add(newParentId); } else { // parent id hasn't changed, check whether // the node has been renamed (JCR-1034) if (!affectedIds.contains(newParentId) && stateMgr.hasTransientItemState(newParentId)) { try { NodeState parent = (NodeState) stateMgr .getTransientItemState(newParentId); // check parent's renamed child node entries for (Iterator<ChildNodeEntry> cneIt = parent .getRenamedChildNodeEntries().iterator(); cneIt.hasNext();) { ChildNodeEntry cne = cneIt.next(); if (cne.getId().equals(nodeState.getId())) { // node has been renamed, // add parent to dependencies dependentIDs.add(newParentId); } } } catch (ItemStateException ise) { // should never get here log.warn("failed to retrieve transient state: " + newParentId, ise); } } } } } } // removed child node entries for (Iterator<ChildNodeEntry> cneIt = nodeState.getRemovedChildNodeEntries().iterator(); cneIt .hasNext();) { ChildNodeEntry cne = cneIt.next(); dependentIDs.add(cne.getId()); } // added child node entries for (Iterator<ChildNodeEntry> cneIt = nodeState.getAddedChildNodeEntries().iterator(); cneIt .hasNext();) { ChildNodeEntry cne = cneIt.next(); dependentIDs.add(cne.getId()); } // now walk through dependencies and check whether they // are within the scope of this save operation Iterator<NodeId> depIt = dependentIDs.iterator(); while (depIt.hasNext()) { NodeId id = depIt.next(); if (!affectedIds.contains(id)) { // JCR-1359 workaround: check whether unresolved // dependencies originate from 'this' session; // otherwise ignore them if (stateMgr.hasTransientItemState(id) || stateMgr.hasTransientItemStateInAttic(id)) { // need to save dependency as well String msg = itemMgr.safeGetJCRPath(id) + " needs to be saved as well."; log.debug(msg); throw new ConstraintViolationException(msg); } } } } } /** * validate access and node type constraints * (this will also validate child removals) */ validateTransientItems(dirty.iterator(), removed.iterator()); // start the update operation try { stateMgr.edit(); } catch (IllegalStateException e) { String msg = "Unable to start edit operation"; log.debug(msg); throw new RepositoryException(msg, e); } boolean succeeded = false; try { // process transient items marked as 'removed' removeTransientItems(removed.iterator()); // process transient items that have change in mixins processShareableNodes(dirty.iterator()); // initialize version histories for new nodes (might generate new transient state) if (initVersionHistories(dirty.iterator())) { // re-build the list of transient states because the previous call // generated new transient state dirty = getTransientStates(); } // process 'new' or 'modified' transient states persistTransientItems(dirty.iterator()); // dispose the transient states marked 'new' or 'modified' // at this point item state data is pushed down one level, // node instances are disconnected from the transient // item state and connected to the 'overlayed' item state. // transient item states must be removed now. otherwise // the session item state provider will return an orphaned // item state which is not referenced by any node instance. for (Iterator<ItemState> it = dirty.iterator(); it.hasNext();) { ItemState transientState = it.next(); // dispose the transient state, it is no longer used stateMgr.disposeTransientItemState(transientState); } // end update operation stateMgr.update(); // update operation succeeded succeeded = true; } catch (StaleItemStateException e) { throw new InvalidItemStateException(e.getMessage()); } catch (ItemStateException e) { throw new RepositoryException("Unable to update item: " + this, e); } finally { if (!succeeded) { // update operation failed, cancel all modifications stateMgr.cancel(); // JCR-288: if an exception has been thrown during // update() the transient changes have already been // applied by persistTransientItems() and we need to // restore transient state, i.e. undo the effect of // persistTransientItems() restoreTransientItems(dirty.iterator()); } } // now it is safe to dispose the transient states: // dispose the transient states marked 'removed'. // item states in attic are removed after store, because // the observation mechanism needs to build paths of removed // items in store(). for (Iterator<ItemState> it = removed.iterator(); it.hasNext();) { ItemState transientState = it.next(); // dispose the transient state, it is no longer used stateMgr.disposeTransientItemStateInAttic(transientState); } } }
From source file:org.opentravel.schemacompiler.repository.RepositoryManager.java
/** * @see org.opentravel.schemacompiler.repository.Repository#publish(java.io.InputStream, * java.lang.String, java.lang.String, java.lang.String, java.lang.String, * java.lang.String, org.opentravel.schemacompiler.model.TLLibraryStatus) *//*from w w w .ja v a 2 s. com*/ @Override public RepositoryItem publish(InputStream unmanagedContent, String filename, String libraryName, String namespace, String versionIdentifier, String versionScheme, TLLibraryStatus initialStatus) throws RepositoryException { String targetNS = RepositoryNamespaceUtils.normalizeUri(namespace); boolean success = false; try { log.info("Publishing '" + filename + "' to namespace '" + targetNS + "'"); fileManager.startChangeSet(); // Check to see if the library has already been published to the repository String baseNamespace = targetNS; if (versionScheme != null) { VersionScheme vScheme = VersionSchemeFactory.getInstance().getVersionScheme(versionScheme); if (vScheme.isValidNamespace(targetNS)) { baseNamespace = vScheme.getBaseNamespace(targetNS); } else { throw new RepositoryException("Cannot publish '" + filename + "' because its namespace is not valid for the assigned version scheme."); } } try { fileManager.loadLibraryMetadata(baseNamespace, filename, versionIdentifier); throw new IllegalStateException(); } catch (IllegalStateException e) { throw new RepositoryException("Unable to publish - the library '" + filename + "' has already been published to a repository."); } catch (RepositoryException e) { // Happy path - the item must not yet exist in the repository } // Create the namespace folder (if it does not already exist) fileManager.createNamespaceIdFiles(baseNamespace); // Save the library meta-data file LibraryInfoType libraryMetadata = new LibraryInfoType(); libraryMetadata.setNamespace(targetNS); libraryMetadata.setBaseNamespace(baseNamespace); libraryMetadata.setFilename(filename); libraryMetadata.setLibraryName(libraryName); libraryMetadata.setVersion(versionIdentifier); libraryMetadata.setVersionScheme(versionScheme); libraryMetadata.setStatus(initialStatus.toRepositoryStatus()); libraryMetadata.setState(RepositoryState.MANAGED_UNLOCKED); libraryMetadata.setOwningRepository(localRepositoryId); File metadataFile = fileManager.saveLibraryMetadata(libraryMetadata); log.info("Library metadata saved: " + metadataFile.getAbsolutePath()); File contentFile = new File(metadataFile.getParent(), filename); // Save the library content fileManager.saveFile(contentFile, unmanagedContent); log.info("Library content saved: " + contentFile.getAbsolutePath()); // Build and return the repository item to represent the content we just published RepositoryItem publishedItem = RepositoryUtils.createRepositoryItem(this, libraryMetadata); success = true; log.info("Content '" + filename + "' published successfully to namespace '" + baseNamespace + "'"); return publishedItem; } catch (VersionSchemeException e) { throw new RepositoryException(e.getMessage(), e); } finally { // Close all streams try { if (unmanagedContent != null) unmanagedContent.close(); } catch (Throwable t) { } // Commit or roll back the changes based on the result of the operation if (success) { fileManager.commitChangeSet(); } else { try { fileManager.rollbackChangeSet(); } catch (Throwable t) { log.error("Error rolling back the current change set.", t); } } } }
From source file:org.apache.accumulo.server.tabletserver.Tablet.java
public TreeMap<KeyExtent, SplitInfo> split(byte[] sp) throws IOException { if (sp != null && extent.getEndRow() != null && extent.getEndRow().equals(new Text(sp))) { throw new IllegalArgumentException(); }// w w w. j a va 2 s . c o m if (extent.isRootTablet()) { String msg = "Cannot split root tablet"; log.warn(msg); throw new RuntimeException(msg); } try { initiateClose(true, false, false); } catch (IllegalStateException ise) { log.debug("File " + extent + " not splitting : " + ise.getMessage()); return null; } // obtain this info outside of synch block since it will involve opening // the map files... it is ok if the set of map files changes, because // this info is used for optimization... it is ok if map files are missing // from the set... can still query and insert into the tablet while this // map file operation is happening Map<FileRef, FileUtil.FileInfo> firstAndLastRows = FileUtil.tryToGetFirstAndLastRows(fs, tabletServer.getSystemConfiguration(), datafileManager.getFiles()); synchronized (this) { // java needs tuples ... TreeMap<KeyExtent, SplitInfo> newTablets = new TreeMap<KeyExtent, SplitInfo>(); long t1 = System.currentTimeMillis(); // choose a split point SplitRowSpec splitPoint; if (sp == null) splitPoint = findSplitRow(datafileManager.getFiles()); else { Text tsp = new Text(sp); splitPoint = new SplitRowSpec( FileUtil.estimatePercentageLTE(fs, tabletServer.getSystemConfiguration(), extent.getPrevEndRow(), extent.getEndRow(), datafileManager.getFiles(), tsp), tsp); } if (splitPoint == null || splitPoint.row == null) { log.info("had to abort split because splitRow was null"); closing = false; return null; } closed = true; completeClose(true, false); Text midRow = splitPoint.row; double splitRatio = splitPoint.splitRatio; KeyExtent low = new KeyExtent(extent.getTableId(), midRow, extent.getPrevEndRow()); KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), midRow); String lowDirectory = TabletOperations.createTabletDirectory(fs, extent.getTableId().toString(), midRow); // write new tablet information to MetadataTable SortedMap<FileRef, DataFileValue> lowDatafileSizes = new TreeMap<FileRef, DataFileValue>(); SortedMap<FileRef, DataFileValue> highDatafileSizes = new TreeMap<FileRef, DataFileValue>(); List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>(); MetadataTableUtil.splitDatafiles(extent.getTableId(), midRow, splitRatio, firstAndLastRows, datafileManager.getDatafileSizes(), lowDatafileSizes, highDatafileSizes, highDatafilesToRemove); log.debug("Files for low split " + low + " " + lowDatafileSizes.keySet()); log.debug("Files for high split " + high + " " + highDatafileSizes.keySet()); String time = tabletTime.getMetadataValue(); // it is possible that some of the bulk loading flags will be deleted after being read below because the bulk load // finishes.... therefore split could propogate load flags for a finished bulk load... there is a special iterator // on the !METADATA table to clean up this type of garbage Map<FileRef, Long> bulkLoadedFiles = MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get(), extent); MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, SystemCredentials.get(), tabletServer.getLock()); MetadataTableUtil.addNewTablet(low, lowDirectory, tabletServer.getTabletSession(), lowDatafileSizes, bulkLoadedFiles, SystemCredentials.get(), time, lastFlushID, lastCompactID, tabletServer.getLock()); MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, SystemCredentials.get(), tabletServer.getLock()); log.log(TLevel.TABLET_HIST, extent + " split " + low + " " + high); newTablets.put(high, new SplitInfo(tabletDirectory, highDatafileSizes, time, lastFlushID, lastCompactID)); newTablets.put(low, new SplitInfo(lowDirectory, lowDatafileSizes, time, lastFlushID, lastCompactID)); long t2 = System.currentTimeMillis(); log.debug(String.format("offline split time : %6.2f secs", (t2 - t1) / 1000.0)); closeComplete = true; return newTablets; } }
From source file:org.apache.accumulo.tserver.Tablet.java
public TreeMap<KeyExtent, SplitInfo> split(byte[] sp) throws IOException { if (sp != null && extent.getEndRow() != null && extent.getEndRow().equals(new Text(sp))) { throw new IllegalArgumentException(); }/* www. java 2 s . c o m*/ if (extent.isRootTablet()) { String msg = "Cannot split root tablet"; log.warn(msg); throw new RuntimeException(msg); } try { initiateClose(true, false, false); } catch (IllegalStateException ise) { log.debug("File " + extent + " not splitting : " + ise.getMessage()); return null; } // obtain this info outside of synch block since it will involve opening // the map files... it is ok if the set of map files changes, because // this info is used for optimization... it is ok if map files are missing // from the set... can still query and insert into the tablet while this // map file operation is happening Map<FileRef, FileUtil.FileInfo> firstAndLastRows = FileUtil.tryToGetFirstAndLastRows(fs, tabletServer.getSystemConfiguration(), datafileManager.getFiles()); synchronized (this) { // java needs tuples ... TreeMap<KeyExtent, SplitInfo> newTablets = new TreeMap<KeyExtent, SplitInfo>(); long t1 = System.currentTimeMillis(); // choose a split point SplitRowSpec splitPoint; if (sp == null) splitPoint = findSplitRow(datafileManager.getFiles()); else { Text tsp = new Text(sp); splitPoint = new SplitRowSpec(FileUtil.estimatePercentageLTE(fs, tabletServer.getSystemConfiguration(), extent.getPrevEndRow(), extent.getEndRow(), FileUtil.toPathStrings(datafileManager.getFiles()), tsp), tsp); } if (splitPoint == null || splitPoint.row == null) { log.info("had to abort split because splitRow was null"); closing = false; return null; } closed = true; completeClose(true, false); Text midRow = splitPoint.row; double splitRatio = splitPoint.splitRatio; KeyExtent low = new KeyExtent(extent.getTableId(), midRow, extent.getPrevEndRow()); KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), midRow); String lowDirectory = TabletOperations.createTabletDirectory(fs, extent.getTableId().toString(), midRow); // write new tablet information to MetadataTable SortedMap<FileRef, DataFileValue> lowDatafileSizes = new TreeMap<FileRef, DataFileValue>(); SortedMap<FileRef, DataFileValue> highDatafileSizes = new TreeMap<FileRef, DataFileValue>(); List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>(); MetadataTableUtil.splitDatafiles(extent.getTableId(), midRow, splitRatio, firstAndLastRows, datafileManager.getDatafileSizes(), lowDatafileSizes, highDatafileSizes, highDatafilesToRemove); log.debug("Files for low split " + low + " " + lowDatafileSizes.keySet()); log.debug("Files for high split " + high + " " + highDatafileSizes.keySet()); String time = tabletTime.getMetadataValue(); // it is possible that some of the bulk loading flags will be deleted after being read below because the bulk load // finishes.... therefore split could propagate load flags for a finished bulk load... there is a special iterator // on the metadata table to clean up this type of garbage Map<FileRef, Long> bulkLoadedFiles = MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get(), extent); MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, SystemCredentials.get(), tabletServer.getLock()); MasterMetadataUtil.addNewTablet(low, lowDirectory, tabletServer.getTabletSession(), lowDatafileSizes, bulkLoadedFiles, SystemCredentials.get(), time, lastFlushID, lastCompactID, tabletServer.getLock()); MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, SystemCredentials.get(), tabletServer.getLock()); log.log(TLevel.TABLET_HIST, extent + " split " + low + " " + high); newTablets.put(high, new SplitInfo(tabletDirectory, highDatafileSizes, time, lastFlushID, lastCompactID, lastLocation)); newTablets.put(low, new SplitInfo(lowDirectory, lowDatafileSizes, time, lastFlushID, lastCompactID, lastLocation)); long t2 = System.currentTimeMillis(); log.debug(String.format("offline split time : %6.2f secs", (t2 - t1) / 1000.0)); closeComplete = true; return newTablets; } }
From source file:org.lockss.subscription.SubscriptionManager.java
/** * Performs the necessary processing for an archival unit that appears in the * configuration changeset./*from w ww . jav a2s .c o m*/ * * @param tdbAu * A TdbAu for the archival unit to be processed. * @param conn * A Connection with the database connection to be used. * @param isFirstRun * A boolean with <code>true</code> if this is the first run of the * subscription manager, <code>false</code> otherwise. * @param config * A Configuration to which to add the archival unit configuration. * @throws DbException * if any problem occurred accessing the database. */ private void processNewTdbAu(TdbAu tdbAu, Connection conn, boolean isFirstRun, Configuration config) throws DbException { final String DEBUG_HEADER = "processNewTdbAu(): "; if (log.isDebug2()) { log.debug2(DEBUG_HEADER + "tdbAu = " + tdbAu); log.debug2(DEBUG_HEADER + "isFirstRun = " + isFirstRun); log.debug2(DEBUG_HEADER + "config = " + config); } // Get the archival unit identifier. String auId; try { auId = tdbAu.getAuId(pluginManager); if (log.isDebug3()) log.debug3(DEBUG_HEADER + "auId = " + auId); } catch (IllegalStateException ise) { log.debug2("Ignored " + tdbAu + " because of problems getting its identifier: " + ise.getMessage()); return; } catch (RuntimeException re) { log.error("Ignored " + tdbAu + " because of problems getting its identifier: " + re.getMessage()); return; } // Check whether the archival unit is already configured. if (pluginManager.getAuFromId(auId) != null) { // Yes: Nothing more to do. if (log.isDebug3()) log.debug3(DEBUG_HEADER + "TdbAu '" + tdbAu + "' is already configured."); return; } // Check whether this is the first run of the subscription manager. if (isFirstRun) { // Yes: Add the archival unit to the table of unconfigured archival units. mdManager.persistUnconfiguredAu(conn, auId); if (log.isDebug2()) log.debug2(DEBUG_HEADER + "Done."); return; } // Nothing to do if the archival unit is in the table of unconfigured // archival units already. if (mdManager.isAuInUnconfiguredAuTable(conn, auId)) { if (log.isDebug2()) log.debug2(DEBUG_HEADER + "Done."); return; } if (log.isDebug3()) { log.debug3(DEBUG_HEADER + "currentTdbTitle = " + currentTdbTitle); log.debug3(DEBUG_HEADER + "tdbAu.getTdbTitle() = " + tdbAu.getTdbTitle()); } // Check whether this archival unit belongs to a different title than the // previous archival unit processed. if (!tdbAu.getTdbTitle().equals(currentTdbTitle)) { // Yes: Update the title data for this archival unit. currentTdbTitle = tdbAu.getTdbTitle(); // Get the subscription ranges for the archival unit title. currentSubscribedRanges = new ArrayList<BibliographicPeriod>(); currentUnsubscribedRanges = new ArrayList<BibliographicPeriod>(); populateTitleSubscriptionRanges(conn, currentTdbTitle, currentSubscribedRanges, currentUnsubscribedRanges); // Get the archival units covered by the subscription. currentCoveredTdbAus = getCoveredTdbAus(currentTdbTitle, currentSubscribedRanges, currentUnsubscribedRanges); } else { // No: Reuse the title data from the previous archival unit. if (log.isDebug3()) log.debug3(DEBUG_HEADER + "Reusing data from title = " + currentTdbTitle); } // Check whether the archival unit covers a subscribed range and it does not // cover any unsubscribed range. if (currentCoveredTdbAus.contains(tdbAu)) { // Yes: Add the archival unit configuration to those to be configured. config = addAuConfiguration(tdbAu, auId, config); } else { // No: Add it to the table of unconfigured archival units. mdManager.persistUnconfiguredAu(conn, auId); } if (log.isDebug2()) log.debug2(DEBUG_HEADER + "Done."); return; }
From source file:org.apache.tinkerpop.gremlin.hadoop.process.computer.giraph.GiraphGraphComputer.java
@Override public int run(final String[] args) { this.giraphConfiguration.setBoolean(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_FORMAT_HAS_EDGES, this.persist.equals(Persist.EDGES)); try {/*from w w w . j av a 2s. co m*/ // it is possible to run graph computer without a vertex program (and thus, only map reduce jobs if they exist) if (null != this.vertexProgram) { // a way to verify in Giraph whether the traversal will go over the wire or not try { VertexProgram.createVertexProgram(this.hadoopGraph, ConfUtil.makeApacheConfiguration(this.giraphConfiguration)); } catch (IllegalStateException e) { if (e.getCause() instanceof NumberFormatException) throw new NotSerializableException( "The provided traversal is not serializable and thus, can not be distributed across the cluster"); } // prepare the giraph vertex-centric computing job final GiraphJob job = new GiraphJob(this.giraphConfiguration, Constants.GREMLIN_HADOOP_GIRAPH_JOB_PREFIX + this.vertexProgram); // handle input paths (if any) if (FileInputFormat.class.isAssignableFrom(this.giraphConfiguration .getClass(Constants.GREMLIN_HADOOP_GRAPH_INPUT_FORMAT, InputFormat.class))) { final Path inputPath = new Path( this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_INPUT_LOCATION)); if (!FileSystem.get(this.giraphConfiguration).exists(inputPath)) // TODO: what about when the input is not a file input? throw new IllegalArgumentException("The provided input path does not exist: " + inputPath); FileInputFormat.setInputPaths(job.getInternalJob(), inputPath); } // handle output paths final Path outputPath = new Path( this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION) + "/" + Constants.HIDDEN_G); FileOutputFormat.setOutputPath(job.getInternalJob(), outputPath); job.getInternalJob().setJarByClass(GiraphGraphComputer.class); this.logger.info(Constants.GREMLIN_HADOOP_GIRAPH_JOB_PREFIX + this.vertexProgram); // execute the job and wait until it completes (if it fails, throw an exception) if (!job.run(true)) throw new IllegalStateException( "The GiraphGraphComputer job failed -- aborting all subsequent MapReduce jobs"); // how do I get the exception that occured? // add vertex program memory values to the return memory for (final String key : this.vertexProgram.getMemoryComputeKeys()) { final Path path = new Path( this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION) + "/" + key); final ObjectWritableIterator iterator = new ObjectWritableIterator(this.giraphConfiguration, path); if (iterator.hasNext()) { this.memory.set(key, iterator.next().getValue()); } FileSystem.get(this.giraphConfiguration).delete(path, true); } final Path path = new Path(this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION) + "/" + Constants.HIDDEN_ITERATION); this.memory.setIteration( (Integer) new ObjectWritableIterator(this.giraphConfiguration, path).next().getValue()); FileSystem.get(this.giraphConfiguration).delete(path, true); } // do map reduce jobs this.giraphConfiguration.setBoolean(Constants.GREMLIN_HADOOP_GRAPH_INPUT_FORMAT_HAS_EDGES, this.giraphConfiguration.getBoolean(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_FORMAT_HAS_EDGES, true)); for (final MapReduce mapReduce : this.mapReducers) { this.memory.addMapReduceMemoryKey(mapReduce); MapReduceHelper.executeMapReduceJob(mapReduce, this.memory, this.giraphConfiguration); } // if no persistence, delete the map reduce output if (this.persist.equals(Persist.NOTHING)) { final Path outputPath = new Path( this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION) + "/" + Constants.HIDDEN_G); if (FileSystem.get(this.giraphConfiguration).exists(outputPath)) // TODO: what about when the output is not a file output? FileSystem.get(this.giraphConfiguration).delete(outputPath, true); } } catch (final Exception e) { throw new IllegalStateException(e.getMessage(), e); } return 0; }