List of usage examples for javax.persistence PersistenceException PersistenceException
public PersistenceException(Throwable cause)
PersistenceException
exception with the specified cause. From source file:com.impetus.client.cassandra.pelops.PelopsClient.java
public void deleteByColumn(String tableName, String columnName, Object columnValue) { if (!isOpen()) { throw new PersistenceException("PelopsClient is closed."); }/*from w w w .ja v a 2s .c o m*/ RowDeletor rowDeletor = Pelops.createRowDeletor(PelopsUtils.generatePoolName(getPersistenceUnit())); rowDeletor.deleteRow(tableName, columnValue.toString(), consistencyLevel); }
From source file:com.impetus.kundera.utils.KunderaCoreUtils.java
private static void prepareLuceneQueryForPartitionKey(LuceneQueryBuilder queryBuilder, Object key, MetamodelImpl metaModel, String indexName, Class valueClazz) { Field[] fields = key.getClass().getDeclaredFields(); EmbeddableType embeddable = metaModel.embeddable(key.getClass()); boolean appendAnd = false; try {//from www. j a v a2 s . c o m for (int i = 0; i < fields.length; i++) { if (!ReflectUtils.isTransientOrStatic(fields[i])) { if (metaModel.isEmbeddable(((AbstractAttribute) embeddable.getAttribute(fields[i].getName())) .getBindableJavaType())) { fields[i].setAccessible(true); prepareLuceneQueryForPartitionKey(queryBuilder, fields[i].get(key), metaModel, indexName, valueClazz); } else { if (appendAnd) { queryBuilder.buildQuery("AND", "AND", String.class); } appendAnd = true; String fieldValue = PropertyAccessorHelper.getString(key, fields[i]); fieldValue = fieldValue.replaceAll("[^a-zA-Z0-9]", "_"); queryBuilder.appendIndexName(indexName).appendPropertyName(fields[i].getName()) .buildQuery("=", fieldValue, valueClazz); } } } } catch (IllegalArgumentException e) { logger.error("Error during prepare composite key, Caused by {}.", e); throw new PersistenceException(e); } catch (IllegalAccessException e) { logger.error(e.getMessage()); } }
From source file:com.impetus.client.neo4j.Neo4JClient.java
/** * Writes an entity to database.//from w w w. ja va 2 s .c o m * * @param entityMetadata * the entity metadata * @param entity * the entity * @param id * the id * @param rlHolders * the rl holders */ @Override protected void onPersist(EntityMetadata entityMetadata, Object entity, Object id, List<RelationHolder> rlHolders) { if (log.isDebugEnabled()) log.debug("Persisting " + entity); // All Modifying Neo4J operations must be executed within a transaction checkActiveTransaction(); GraphDatabaseService graphDb = getConnection(); try { // Top level node Node node = mapper.getNodeFromEntity(entity, id, graphDb, entityMetadata, isUpdate); if (node != null) { MetamodelImpl metamodel = (MetamodelImpl) kunderaMetadata.getApplicationMetadata() .getMetamodel(getPersistenceUnit()); ((Neo4JTransaction) resource).addProcessedNode(id, node); if (!rlHolders.isEmpty()) { for (RelationHolder rh : rlHolders) { // Search Node (to be connected to ) in Neo4J graph EntityMetadata targetNodeMetadata = KunderaMetadataManager .getEntityMetadata(kunderaMetadata, rh.getRelationValue().getClass()); Object targetNodeKey = PropertyAccessorHelper.getId(rh.getRelationValue(), targetNodeMetadata); // Node targetNode = mapper.searchNode(targetNodeKey, // targetNodeMetadata, graphDb); Node targetNode = null; // Target node connected through // relationship /** * If Relationship is with an entity in Neo4J, Target * node must already have been created Get a handle of * it from processed nodes and add edges to it. Else, if * relationship is with an entity in a database other * than Neo4J, create a "Proxy Node" that points to a * row in other database. This proxy node contains key * equal to primary key of row in other database. * */ if (isEntityForNeo4J(targetNodeMetadata)) { targetNode = ((Neo4JTransaction) resource).getProcessedNode(targetNodeKey); } else { // Create Proxy nodes for insert requests if (!isUpdate) { targetNode = mapper.createProxyNode(id, targetNodeKey, graphDb, entityMetadata, targetNodeMetadata); } } if (targetNode != null) { // Join this node (source node) to target node via // relationship DynamicRelationshipType relType = DynamicRelationshipType .withName(rh.getRelationName()); Relationship relationship = node.createRelationshipTo(targetNode, relType); // Populate relationship's own properties into it Object relationshipObj = rh.getRelationVia(); if (relationshipObj != null) { mapper.populateRelationshipProperties(entityMetadata, targetNodeMetadata, relationship, relationshipObj); // After relationship creation, manually index // it if desired EntityMetadata relationMetadata = KunderaMetadataManager .getEntityMetadata(kunderaMetadata, relationshipObj.getClass()); if (!isUpdate) { indexer.indexRelationship(relationMetadata, graphDb, relationship, metamodel); } else { indexer.updateRelationshipIndex(relationMetadata, graphDb, relationship, metamodel); } } } } } // After node creation, manually index this node, if desired if (!isUpdate) { indexer.indexNode(entityMetadata, graphDb, node, metamodel); } else { indexer.updateNodeIndex(entityMetadata, graphDb, node, metamodel); } } } catch (Exception e) { log.error("Error while persisting entity " + entity + ", Caused by: ", e); throw new PersistenceException(e); } }
From source file:icom.jpa.bdk.dao.EntityDAO.java
public Entity concludeCreateObject(ManagedIdentifiableProxy obj, DAOContext context, Projection proj) { EntityCreator creator = (EntityCreator) context.getCreator(); try {//www. java 2 s . c o m BdkUserContextImpl userContext = (BdkUserContextImpl) obj.getPersistenceContext().getUserContext(); BeeId id = getBeeId(obj.getObjectId().toString()); String resourceType = id.getResourceType(); String params = getCreateObjectParameters(obj, context); PostMethod postMethod = preparePostMethod(resourceType, userContext.antiCSRF, proj, params); Entity bdkEntity = (Entity) bdkHttpUtil.execute(getBdkClass(obj), postMethod, creator, userContext.httpClient); String objectId = bdkEntity.getCollabId().getId(); obj.setObjectId(objectId); assignObjectId(obj.getPojoIdentifiable(), objectId); if (proj != Projection.EMPTY) { String changeToken = bdkEntity.getSnapshotId(); assignChangeToken(obj.getPojoIdentifiable(), changeToken); } // re-cache the object with the server assigned id // the server may assign a new object id rather than use the client assigned id obj.getPersistenceContext().recacheIdentifiableDependent(obj); return bdkEntity; } catch (Exception ex) { throw new PersistenceException(ex); } }
From source file:org.appverse.web.framework.backend.persistence.services.integration.impl.live.JPAPersistenceService.java
private void checkMaxFilterColumnsToSortDeep(final IntegrationDataFilter filter) { // Get maximum deep in "columnsToSort" int columnsDeep = 0; for (String columnPath : filter.getColumnsToSort()) { columnsDeep = Math.max(StringUtils.countOccurrencesOf(columnPath, "."), columnsDeep); }/*from ww w. j a v a 2 s . c o m*/ if (columnsDeep > 0) { StringBuffer e = new StringBuffer(); e.append(PersistenceMessageBundle.MSG_DAO_INVALID_FILTER_ORDERING_COLUMNS) .append(getClassP().getSimpleName()).append(".").append(filter.toString()).append(".") .append(PersistenceMessageBundle.MSG_DAO_INVALID_FILTER_ADVIDE); logger.error(e.toString()); throw new PersistenceException(e.toString()); } }
From source file:icom.jpa.bdk.dao.EntityDAO.java
public void delete(ManagedIdentifiableProxy obj) { try {/*from w ww . j a va 2 s . c o m*/ BdkUserContextImpl userContext = (BdkUserContextImpl) obj.getPersistenceContext().getUserContext(); BeeId id = getBeeId(obj.getObjectId().toString()); String collabId = id.getId(); String resourceType = id.getResourceType(); DeleteMethod deleteMethod = prepareDeleteMethod(resourceType, collabId, userContext.antiCSRF); bdkHttpUtil.execute(getBdkClass(obj), deleteMethod, userContext.httpClient); } catch (Exception ex) { throw new PersistenceException(ex); } }
From source file:com.abiquo.server.core.infrastructure.MachineDAO.java
private void whyNotCandidateMachines(final Integer idRack, final Integer idVirtualDatacenter, final Long hdRequiredOnDatastore, final Enterprise enterprise, final List<Integer> reserveds) throws PersistenceException { if (reserveds != null) { StringBuilder reservedMachinesB = new StringBuilder( String.format("Enterprise %s has the following machine reservations : ", enterprise.getName())); for (Integer mid : reserveds) { reservedMachinesB.append(mid + ' '); }// w ww .j a v a2 s .c o m /** * rack and hypervisor type */ Query query1 = getSession().createQuery(WHY_QUERY_CANDIDATE_SAME_VDC_RACK_AND_TYPE_AND_RESERVED); query1.setInteger("idVirtualDataCenter", idVirtualDatacenter); query1.setInteger("idRack", idRack); query1.setParameterList("reserveds", reserveds); List<Integer> query1res = query1.list(); if (query1res.size() == 0) { throw new PersistenceException(String.format( "%s\nThere isn't any machine on the required rack [%d] and virtual datacenter [%d]. " + "Please check the racks and hypervisor technology on the infrastructure.", reservedMachinesB.toString(), idRack, idVirtualDatacenter)); } /** * rack, hypervisor type and managed state */ Query query2 = getSession().createQuery(QUERY_CANDIDATE_MACHINES_RESERVED); query2.setInteger("idVirtualDataCenter", idVirtualDatacenter); query2.setInteger("idRack", idRack); query2.setParameter("state", MachineState.MANAGED); query2.setParameterList("reserveds", reserveds); List<Integer> query2res = query2.list(); if (query2res.size() == 0) { throw new PersistenceException(String.format( "%s\nThere isn't any MANAGED machine on the required rack [%d] and virtual datacenter [%d]. " + "Please check the machine health on the infrastructure.", reservedMachinesB.toString(), idRack, idVirtualDatacenter)); } /** * rack, hypervisor type, managed state, enterprise reservation and datastore capacity. */ throw new PersistenceException( String.format("%s\nThere isn't any machine with the required datastore capacity [%d]", reservedMachinesB.toString(), hdRequiredOnDatastore)); } // reserved machines else { /** * rack and hypervisor type */ Query query1 = getSession().createQuery(WHY_QUERY_CANDIDATE_SAME_VDC_RACK_AND_TYPE); query1.setInteger("idVirtualDataCenter", idVirtualDatacenter); query1.setInteger("idRack", idRack); List<Integer> query1res = query1.list(); if (query1res.size() == 0) { throw new PersistenceException(String.format( "There isn't any machine on the required rack [%d] and virtual datacenter [%d]. " + "Please check the racks and hypervisor technology on the infrastructure.", idRack, idVirtualDatacenter)); } /** * rack, hypervisor type and managed state */ Query query2 = getSession().createQuery(WHT_QUERY_CANDIDATE_SAME_VDC_RACK_AND_TYPE_AND_STATE); query2.setInteger("idVirtualDataCenter", idVirtualDatacenter); query2.setInteger("idRack", idRack); query2.setParameter("state", MachineState.MANAGED); List<Integer> query2res = query2.list(); if (query2res.size() == 0) { throw new PersistenceException(String.format( "There isn't any MANAGED machine on the required rack [%d] and virtual datacenter [%d]. " + "Please check the machine health on the infrastructure.", idRack, idVirtualDatacenter)); } /** * rack, hypervisor type, managed state and enterprise reservation */ Query query3 = getSession().createQuery(QUERY_CANDIDATE_MACHINES); query3.setInteger("idVirtualDataCenter", idVirtualDatacenter); query3.setInteger("idRack", idRack); query3.setParameter("state", MachineState.MANAGED); query3.setParameter("enterpriseId", enterprise.getId()); List<Integer> query3res = query3.list(); if (query3res.size() == 0) { throw new PersistenceException(String.format( "There isn't any MANAGED machine on the required rack [%d] and virtual datacenter [%d] available for the current enterpirse [%s]. " + "Pleas check the machine reservation policies.", idRack, idVirtualDatacenter, enterprise.getName())); } /** * rack, hypervisor type, managed state, enterprise reservation and datastore capacity. */ throw new PersistenceException(String.format( "There isn't any machine with the required datastore capacity [%d]", hdRequiredOnDatastore)); } }
From source file:com.impetus.client.neo4j.Neo4JClient.java
@Override public int executeBatch() { if (batchSize > 0) { boolean nodeAutoIndexingEnabled = indexer.isNodeAutoIndexingEnabled(factory.getConnection()); boolean relationshipAutoIndexingEnabled = indexer .isRelationshipAutoIndexingEnabled(factory.getConnection()); BatchInserter inserter = getBatchInserter(); BatchInserterIndexProvider indexProvider = new LuceneBatchInserterIndexProvider(inserter); if (inserter == null) { log.error("Unable to create instance of BatchInserter. Opertion will fail"); throw new PersistenceException("Unable to create instance of BatchInserter. Opertion will fail"); }/* www . ja v a 2 s.c o m*/ if (resource != null && resource.isActive()) { log.error("Batch Insertion MUST not be executed in a transaction"); throw new PersistenceException("Batch Insertion MUST not be executed in a transaction"); } Map<Object, Long> pkToNodeIdMap = new HashMap<Object, Long>(); for (com.impetus.kundera.graph.Node graphNode : nodes) { if (graphNode.isDirty()) { graphNode.handlePreEvent(); // Delete can not be executed in batch, deleting normally if (graphNode.isInState(RemovedState.class)) { delete(graphNode.getData(), graphNode.getEntityId()); } else if (graphNode.isUpdate()) { // Neo4J allows only batch insertion, follow usual path // for normal updates persist(graphNode); } else { // Insert node Object entity = graphNode.getData(); EntityMetadata m = KunderaMetadataManager.getEntityMetadata(kunderaMetadata, entity.getClass()); Object pk = PropertyAccessorHelper.getId(entity, m); Map<String, Object> nodeProperties = mapper.createNodeProperties(entity, m); long nodeId = inserter.createNode(nodeProperties); pkToNodeIdMap.put(pk, nodeId); // Index Node indexer.indexNodeUsingBatchIndexer(indexProvider, m, nodeId, nodeProperties, nodeAutoIndexingEnabled); // Insert relationships for this particular node if (!getRelationHolders(graphNode).isEmpty()) { for (RelationHolder rh : getRelationHolders(graphNode)) { // Search Node (to be connected to ) in Neo4J // graph EntityMetadata targetNodeMetadata = KunderaMetadataManager .getEntityMetadata(kunderaMetadata, rh.getRelationValue().getClass()); Object targetNodeKey = PropertyAccessorHelper.getId(rh.getRelationValue(), targetNodeMetadata); Long targetNodeId = pkToNodeIdMap.get(targetNodeKey); if (targetNodeId != null) { /** * Join this node (source node) to target * node via relationship */ // Relationship Type DynamicRelationshipType relType = DynamicRelationshipType .withName(rh.getRelationName()); // Relationship Properties Map<String, Object> relationshipProperties = null; Object relationshipObj = rh.getRelationVia(); if (relationshipObj != null) { EntityMetadata relationMetadata = KunderaMetadataManager .getEntityMetadata(kunderaMetadata, relationshipObj.getClass()); relationshipProperties = mapper.createRelationshipProperties(m, targetNodeMetadata, relationshipObj); // Finally insert relationship long relationshipId = inserter.createRelationship(nodeId, targetNodeId, relType, relationshipProperties); // Index this relationship indexer.indexRelationshipUsingBatchIndexer(indexProvider, relationMetadata, relationshipId, relationshipProperties, relationshipAutoIndexingEnabled); } } } } } graphNode.handlePostEvent(); } } // Shutdown Batch inserter indexProvider.shutdown(); inserter.shutdown(); // Restore Graph Database service factory.setConnection((GraphDatabaseService) factory.createPoolOrConnection()); return pkToNodeIdMap.size(); } else { return 0; } }
From source file:com.impetus.client.oraclenosql.OracleNoSQLClient.java
@Override public <E> List<E> findAll(Class<E> entityClass, String[] columnsToSelect, Object... keys) { List<E> results = new ArrayList<E>(); if (columnsToSelect == null) { columnsToSelect = new String[0]; }//w w w. j ava 2 s.co m if (keys == null) { EntityMetadata entityMetadata = KunderaMetadataManager.getEntityMetadata(kunderaMetadata, entityClass); MetamodelImpl metamodel = (MetamodelImpl) KunderaMetadataManager.getMetamodel(kunderaMetadata, entityMetadata.getPersistenceUnit()); EntityType entityType = metamodel.entity(entityMetadata.getEntityClazz()); Table schemaTable = tableAPI.getTable(entityMetadata.getTableName()); // KunderaCoreUtils.showQuery("Get all records for " + // entityMetadata.getTableName(), showQuery); Iterator<Row> rowsIter = tableAPI.tableIterator(schemaTable.createPrimaryKey(), null, null); Map<String, Object> relationMap = initialize(entityMetadata); try { results = scrollAndPopulate(null, entityMetadata, metamodel, schemaTable, rowsIter, relationMap, Arrays.asList(columnsToSelect)); } catch (Exception e) { log.error("Error while finding records , Caused By :" + e + "."); throw new PersistenceException(e); } } else { for (Object key : keys) { results.add((E) find(entityClass, key, Arrays.asList(columnsToSelect))); } } return results; }
From source file:org.appverse.web.framework.backend.persistence.services.integration.impl.live.JPAPersistenceService.java
private void checkMaxFilterConditionsColumnsDeep(final IntegrationDataFilter filter) { int columnsDeep = 0; // Get maximum deep in "columns" if (filter.getColumns() != null) { for (String columnPath : filter.getColumns()) { columnsDeep = Math.max(StringUtils.countOccurrencesOf(columnPath, "."), columnsDeep); }/*from www . j av a 2 s. c o m*/ } // Get maximum deep in "columnsIsNull" if (filter.getColumnsIsNull() != null) { for (String columnPath : filter.getColumnsIsNull()) { columnsDeep = Math.max(StringUtils.countOccurrencesOf(columnPath, "."), columnsDeep); } } if (columnsDeep > 1) { StringBuffer e = new StringBuffer(); e.append(PersistenceMessageBundle.MSG_DAO_INVALID_FILTER_CONDITIONS_COLUMNS) .append(getClassP().getSimpleName()).append(".").append(filter.toString()).append(".") .append(PersistenceMessageBundle.MSG_DAO_INVALID_FILTER_ADVIDE); logger.error(e.toString()); throw new PersistenceException(e.toString()); } }