Java tutorial
// Description: Java7 in-memory RAM DbIO implementation for HostNode. /* * CF Asterisk 11 Configuration Model * * Copyright (c) 2013-2014 Mark Sobkow * * This program is available as free software under the GNU GPL v3, or * under a commercial license from Mark Sobkow. For commercial licensing * details, please contact msobkow@sasktel.net. * * Under the terms of the GPL: * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * This source code incorporates modified modules originally licensed * under the Apache 2.0 license by MSS Code Factory including CFSecurity * (net-sourceforge-msscodefactory-2.0-cfsecurity.xml), * CFInternet (net-sourceforge-msscodefactory-2.0-cfinternet.xml), and * CFCrm 2.0 (net-sourceforge-msscodefactory-2.0-cfcrm.xml), with all of the * required models being available as part of the MSS Code Factory 1.11 * distribution source and install zips. * * You can download installations of MSS Code Factory 1.11 from * http://msscodefactory.sourceforge.net/ * * *********************************************************************** * * Code manufactured by MSS Code Factory */ package net.sourceforge.msscodefactory.cfasterisk.v2_0.CFAstRam; import java.sql.*; import java.util.*; import net.sourceforge.msscodefactory.cflib.v1_11.CFLib.*; import org.apache.commons.codec.binary.Base64; import net.sourceforge.msscodefactory.cfasterisk.v2_0.CFAst.*; import net.sourceforge.msscodefactory.cfasterisk.v2_0.CFAstRam.*; /* * CFAstRamHostNodeTable in-memory RAM DbIO implementation * for HostNode. */ public class CFAstRamHostNodeTable implements ICFAstHostNodeTable { private CFAstRamSchema schema; private Map<CFAstHostNodePKey, CFAstHostNodeBuff> dictByPKey = new HashMap<CFAstHostNodePKey, CFAstHostNodeBuff>(); private SortedMap<CFAstHostNodeByClusterIdxKey, SortedMap<CFAstHostNodePKey, CFAstHostNodeBuff>> dictByClusterIdx = new TreeMap<CFAstHostNodeByClusterIdxKey, SortedMap<CFAstHostNodePKey, CFAstHostNodeBuff>>(); private SortedMap<CFAstHostNodeByUDescrIdxKey, CFAstHostNodeBuff> dictByUDescrIdx = new TreeMap<CFAstHostNodeByUDescrIdxKey, CFAstHostNodeBuff>(); private SortedMap<CFAstHostNodeByHostNameIdxKey, CFAstHostNodeBuff> dictByHostNameIdx = new TreeMap<CFAstHostNodeByHostNameIdxKey, CFAstHostNodeBuff>(); public CFAstRamHostNodeTable(CFAstRamSchema argSchema) { schema = argSchema; } public void createHostNode(CFAstAuthorization Authorization, CFAstHostNodeBuff Buff) { CFAstHostNodePKey pkey = schema.getFactoryHostNode().newPKey(); pkey.setRequiredClusterId(Buff.getRequiredClusterId()); pkey.setRequiredHostNodeId(((CFAstRamClusterTable) schema.getTableCluster()) .nextHostNodeIdGen(Authorization, Buff.getRequiredClusterId())); Buff.setRequiredClusterId(pkey.getRequiredClusterId()); Buff.setRequiredHostNodeId(pkey.getRequiredHostNodeId()); CFAstHostNodeByClusterIdxKey keyClusterIdx = schema.getFactoryHostNode().newClusterIdxKey(); keyClusterIdx.setRequiredClusterId(Buff.getRequiredClusterId()); CFAstHostNodeByUDescrIdxKey keyUDescrIdx = schema.getFactoryHostNode().newUDescrIdxKey(); keyUDescrIdx.setRequiredClusterId(Buff.getRequiredClusterId()); keyUDescrIdx.setRequiredDescription(Buff.getRequiredDescription()); CFAstHostNodeByHostNameIdxKey keyHostNameIdx = schema.getFactoryHostNode().newHostNameIdxKey(); keyHostNameIdx.setRequiredClusterId(Buff.getRequiredClusterId()); keyHostNameIdx.setRequiredHostName(Buff.getRequiredHostName()); // Validate unique indexes if (dictByPKey.containsKey(pkey)) { throw CFLib.getDefaultExceptionFactory().newPrimaryKeyNotNewException(getClass(), "createHostNode", pkey); } if (dictByUDescrIdx.containsKey(keyUDescrIdx)) { throw CFLib.getDefaultExceptionFactory().newUniqueIndexViolationException(getClass(), "createHostNode", "HostNodeUDescrIdx", keyUDescrIdx); } if (dictByHostNameIdx.containsKey(keyHostNameIdx)) { throw CFLib.getDefaultExceptionFactory().newUniqueIndexViolationException(getClass(), "createHostNode", "HostNodeUHostNameIdx", keyHostNameIdx); } // Validate foreign keys { boolean allNull = true; allNull = false; if (!allNull) { if (null == schema.getTableCluster().readDerivedByIdIdx(Authorization, Buff.getRequiredClusterId())) { throw CFLib.getDefaultExceptionFactory().newUnresolvedRelationException(getClass(), "createHostNode", "Container", "HostNodeCluster", "Cluster", null); } } } // Proceed with adding the new record dictByPKey.put(pkey, Buff); SortedMap<CFAstHostNodePKey, CFAstHostNodeBuff> subdictClusterIdx; if (dictByClusterIdx.containsKey(keyClusterIdx)) { subdictClusterIdx = dictByClusterIdx.get(keyClusterIdx); } else { subdictClusterIdx = new TreeMap<CFAstHostNodePKey, CFAstHostNodeBuff>(); dictByClusterIdx.put(keyClusterIdx, subdictClusterIdx); } subdictClusterIdx.put(pkey, Buff); dictByUDescrIdx.put(keyUDescrIdx, Buff); dictByHostNameIdx.put(keyHostNameIdx, Buff); } public CFAstHostNodeBuff readDerived(CFAstAuthorization Authorization, CFAstHostNodePKey PKey) { final String S_ProcName = "CFAstRamHostNode.readDerived() "; CFAstHostNodePKey key = schema.getFactoryHostNode().newPKey(); key.setRequiredClusterId(PKey.getRequiredClusterId()); key.setRequiredHostNodeId(PKey.getRequiredHostNodeId()); CFAstHostNodeBuff buff; if (dictByPKey.containsKey(key)) { buff = dictByPKey.get(key); } else { buff = null; } return (buff); } public CFAstHostNodeBuff lockDerived(CFAstAuthorization Authorization, CFAstHostNodePKey PKey) { final String S_ProcName = "CFAstRamHostNode.readDerived() "; CFAstHostNodePKey key = schema.getFactoryHostNode().newPKey(); key.setRequiredClusterId(PKey.getRequiredClusterId()); key.setRequiredHostNodeId(PKey.getRequiredHostNodeId()); CFAstHostNodeBuff buff; if (dictByPKey.containsKey(key)) { buff = dictByPKey.get(key); } else { buff = null; } return (buff); } public CFAstHostNodeBuff[] readAllDerived(CFAstAuthorization Authorization) { final String S_ProcName = "CFAstRamHostNode.readAllDerived() "; CFAstHostNodeBuff[] retList = new CFAstHostNodeBuff[dictByPKey.values().size()]; Iterator<CFAstHostNodeBuff> iter = dictByPKey.values().iterator(); int idx = 0; while (iter.hasNext()) { retList[idx++] = iter.next(); } return (retList); } public CFAstHostNodeBuff[] readDerivedByClusterIdx(CFAstAuthorization Authorization, long ClusterId) { final String S_ProcName = "CFAstRamHostNode.readDerivedByClusterIdx() "; CFAstHostNodeByClusterIdxKey key = schema.getFactoryHostNode().newClusterIdxKey(); key.setRequiredClusterId(ClusterId); CFAstHostNodeBuff[] recArray; if (dictByClusterIdx.containsKey(key)) { SortedMap<CFAstHostNodePKey, CFAstHostNodeBuff> subdictClusterIdx = dictByClusterIdx.get(key); recArray = new CFAstHostNodeBuff[subdictClusterIdx.size()]; Iterator<CFAstHostNodeBuff> iter = subdictClusterIdx.values().iterator(); int idx = 0; while (iter.hasNext()) { recArray[idx++] = iter.next(); } } else { recArray = new CFAstHostNodeBuff[0]; } return (recArray); } public CFAstHostNodeBuff readDerivedByUDescrIdx(CFAstAuthorization Authorization, long ClusterId, String Description) { final String S_ProcName = "CFAstRamHostNode.readDerivedByUDescrIdx() "; CFAstHostNodeByUDescrIdxKey key = schema.getFactoryHostNode().newUDescrIdxKey(); key.setRequiredClusterId(ClusterId); key.setRequiredDescription(Description); CFAstHostNodeBuff buff; if (dictByUDescrIdx.containsKey(key)) { buff = dictByUDescrIdx.get(key); } else { buff = null; } return (buff); } public CFAstHostNodeBuff readDerivedByHostNameIdx(CFAstAuthorization Authorization, long ClusterId, String HostName) { final String S_ProcName = "CFAstRamHostNode.readDerivedByHostNameIdx() "; CFAstHostNodeByHostNameIdxKey key = schema.getFactoryHostNode().newHostNameIdxKey(); key.setRequiredClusterId(ClusterId); key.setRequiredHostName(HostName); CFAstHostNodeBuff buff; if (dictByHostNameIdx.containsKey(key)) { buff = dictByHostNameIdx.get(key); } else { buff = null; } return (buff); } public CFAstHostNodeBuff readDerivedByIdIdx(CFAstAuthorization Authorization, long ClusterId, long HostNodeId) { final String S_ProcName = "CFAstRamHostNode.readDerivedByIdIdx() "; CFAstHostNodePKey key = schema.getFactoryHostNode().newPKey(); key.setRequiredClusterId(ClusterId); key.setRequiredHostNodeId(HostNodeId); CFAstHostNodeBuff buff; if (dictByPKey.containsKey(key)) { buff = dictByPKey.get(key); } else { buff = null; } return (buff); } public CFAstHostNodeBuff readBuff(CFAstAuthorization Authorization, CFAstHostNodePKey PKey) { final String S_ProcName = "CFAstRamHostNode.readBuff() "; CFAstHostNodeBuff buff = readDerived(Authorization, PKey); if ((buff != null) && (!buff.getClassCode().equals("HSND"))) { buff = null; } return (buff); } public CFAstHostNodeBuff lockBuff(CFAstAuthorization Authorization, CFAstHostNodePKey PKey) { final String S_ProcName = "CFAstRamHostNode.readBuff() "; CFAstHostNodeBuff buff = readDerived(Authorization, PKey); if ((buff != null) && (!buff.getClassCode().equals("HSND"))) { buff = null; } return (buff); } public CFAstHostNodeBuff[] readAllBuff(CFAstAuthorization Authorization) { final String S_ProcName = "CFAstRamHostNode.readAllBuff() "; CFAstHostNodeBuff buff; ArrayList<CFAstHostNodeBuff> filteredList = new ArrayList<CFAstHostNodeBuff>(); CFAstHostNodeBuff[] buffList = readAllDerived(Authorization); for (int idx = 0; idx < buffList.length; idx++) { buff = buffList[idx]; if ((buff != null) && buff.getClassCode().equals("HSND")) { filteredList.add(buff); } } return (filteredList.toArray(new CFAstHostNodeBuff[0])); } public CFAstHostNodeBuff readBuffByIdIdx(CFAstAuthorization Authorization, long ClusterId, long HostNodeId) { final String S_ProcName = "CFAstRamHostNode.readBuffByIdIdx() "; CFAstHostNodeBuff buff = readDerivedByIdIdx(Authorization, ClusterId, HostNodeId); if ((buff != null) && buff.getClassCode().equals("HSND")) { return ((CFAstHostNodeBuff) buff); } else { return (null); } } public CFAstHostNodeBuff[] readBuffByClusterIdx(CFAstAuthorization Authorization, long ClusterId) { final String S_ProcName = "CFAstRamHostNode.readBuffByClusterIdx() "; CFAstHostNodeBuff buff; ArrayList<CFAstHostNodeBuff> filteredList = new ArrayList<CFAstHostNodeBuff>(); CFAstHostNodeBuff[] buffList = readDerivedByClusterIdx(Authorization, ClusterId); for (int idx = 0; idx < buffList.length; idx++) { buff = buffList[idx]; if ((buff != null) && buff.getClassCode().equals("HSND")) { filteredList.add((CFAstHostNodeBuff) buff); } } return (filteredList.toArray(new CFAstHostNodeBuff[0])); } public CFAstHostNodeBuff readBuffByUDescrIdx(CFAstAuthorization Authorization, long ClusterId, String Description) { final String S_ProcName = "CFAstRamHostNode.readBuffByUDescrIdx() "; CFAstHostNodeBuff buff = readDerivedByUDescrIdx(Authorization, ClusterId, Description); if ((buff != null) && buff.getClassCode().equals("HSND")) { return ((CFAstHostNodeBuff) buff); } else { return (null); } } public CFAstHostNodeBuff readBuffByHostNameIdx(CFAstAuthorization Authorization, long ClusterId, String HostName) { final String S_ProcName = "CFAstRamHostNode.readBuffByHostNameIdx() "; CFAstHostNodeBuff buff = readDerivedByHostNameIdx(Authorization, ClusterId, HostName); if ((buff != null) && buff.getClassCode().equals("HSND")) { return ((CFAstHostNodeBuff) buff); } else { return (null); } } public void updateHostNode(CFAstAuthorization Authorization, CFAstHostNodeBuff Buff) { CFAstHostNodePKey pkey = schema.getFactoryHostNode().newPKey(); pkey.setRequiredClusterId(Buff.getRequiredClusterId()); pkey.setRequiredHostNodeId(Buff.getRequiredHostNodeId()); CFAstHostNodeBuff existing = dictByPKey.get(pkey); if (existing == null) { throw CFLib.getDefaultExceptionFactory().newStaleCacheDetectedException(getClass(), "updateHostNode", "Existing record not found", "HostNode", pkey); } if (existing.getRequiredRevision() != Buff.getRequiredRevision()) { throw CFLib.getDefaultExceptionFactory().newCollisionDetectedException(getClass(), "updateHostNode", pkey); } Buff.setRequiredRevision(Buff.getRequiredRevision() + 1); CFAstHostNodeByClusterIdxKey existingKeyClusterIdx = schema.getFactoryHostNode().newClusterIdxKey(); existingKeyClusterIdx.setRequiredClusterId(existing.getRequiredClusterId()); CFAstHostNodeByClusterIdxKey newKeyClusterIdx = schema.getFactoryHostNode().newClusterIdxKey(); newKeyClusterIdx.setRequiredClusterId(Buff.getRequiredClusterId()); CFAstHostNodeByUDescrIdxKey existingKeyUDescrIdx = schema.getFactoryHostNode().newUDescrIdxKey(); existingKeyUDescrIdx.setRequiredClusterId(existing.getRequiredClusterId()); existingKeyUDescrIdx.setRequiredDescription(existing.getRequiredDescription()); CFAstHostNodeByUDescrIdxKey newKeyUDescrIdx = schema.getFactoryHostNode().newUDescrIdxKey(); newKeyUDescrIdx.setRequiredClusterId(Buff.getRequiredClusterId()); newKeyUDescrIdx.setRequiredDescription(Buff.getRequiredDescription()); CFAstHostNodeByHostNameIdxKey existingKeyHostNameIdx = schema.getFactoryHostNode().newHostNameIdxKey(); existingKeyHostNameIdx.setRequiredClusterId(existing.getRequiredClusterId()); existingKeyHostNameIdx.setRequiredHostName(existing.getRequiredHostName()); CFAstHostNodeByHostNameIdxKey newKeyHostNameIdx = schema.getFactoryHostNode().newHostNameIdxKey(); newKeyHostNameIdx.setRequiredClusterId(Buff.getRequiredClusterId()); newKeyHostNameIdx.setRequiredHostName(Buff.getRequiredHostName()); // Check unique indexes if (!existingKeyUDescrIdx.equals(newKeyUDescrIdx)) { if (dictByUDescrIdx.containsKey(newKeyUDescrIdx)) { throw CFLib.getDefaultExceptionFactory().newUniqueIndexViolationException(getClass(), "updateHostNode", "HostNodeUDescrIdx", newKeyUDescrIdx); } } if (!existingKeyHostNameIdx.equals(newKeyHostNameIdx)) { if (dictByHostNameIdx.containsKey(newKeyHostNameIdx)) { throw CFLib.getDefaultExceptionFactory().newUniqueIndexViolationException(getClass(), "updateHostNode", "HostNodeUHostNameIdx", newKeyHostNameIdx); } } // Validate foreign keys { boolean allNull = true; if (allNull) { if (null == schema.getTableCluster().readDerivedByIdIdx(Authorization, Buff.getRequiredClusterId())) { throw CFLib.getDefaultExceptionFactory().newUnresolvedRelationException(getClass(), "updateHostNode", "Container", "HostNodeCluster", "Cluster", null); } } } // Update is valid SortedMap<CFAstHostNodePKey, CFAstHostNodeBuff> subdict; dictByPKey.remove(pkey); dictByPKey.put(pkey, Buff); subdict = dictByClusterIdx.get(existingKeyClusterIdx); if (subdict != null) { subdict.remove(pkey); } if (dictByClusterIdx.containsKey(newKeyClusterIdx)) { subdict = dictByClusterIdx.get(newKeyClusterIdx); } else { subdict = new TreeMap<CFAstHostNodePKey, CFAstHostNodeBuff>(); dictByClusterIdx.put(newKeyClusterIdx, subdict); } subdict.put(pkey, Buff); dictByUDescrIdx.remove(existingKeyUDescrIdx); dictByUDescrIdx.put(newKeyUDescrIdx, Buff); dictByHostNameIdx.remove(existingKeyHostNameIdx); dictByHostNameIdx.put(newKeyHostNameIdx, Buff); } public void deleteHostNode(CFAstAuthorization Authorization, CFAstHostNodeBuff Buff) { final String S_ProcName = "CFAstRamHostNodeTable.deleteHostNode() "; CFAstHostNodePKey pkey = schema.getFactoryHostNode().newPKey(); pkey.setRequiredClusterId(Buff.getRequiredClusterId()); pkey.setRequiredHostNodeId(Buff.getRequiredHostNodeId()); CFAstHostNodeBuff existing = dictByPKey.get(pkey); if (existing == null) { return; } if (existing.getRequiredRevision() != Buff.getRequiredRevision()) { throw CFLib.getDefaultExceptionFactory().newCollisionDetectedException(getClass(), "deleteHostNode", pkey); } CFAstHostNodeByClusterIdxKey keyClusterIdx = schema.getFactoryHostNode().newClusterIdxKey(); keyClusterIdx.setRequiredClusterId(existing.getRequiredClusterId()); CFAstHostNodeByUDescrIdxKey keyUDescrIdx = schema.getFactoryHostNode().newUDescrIdxKey(); keyUDescrIdx.setRequiredClusterId(existing.getRequiredClusterId()); keyUDescrIdx.setRequiredDescription(existing.getRequiredDescription()); CFAstHostNodeByHostNameIdxKey keyHostNameIdx = schema.getFactoryHostNode().newHostNameIdxKey(); keyHostNameIdx.setRequiredClusterId(existing.getRequiredClusterId()); keyHostNameIdx.setRequiredHostName(existing.getRequiredHostName()); // Validate reverse foreign keys // Delete is valid schema.getTableService().deleteServiceByHostIdx(Authorization, Buff.getRequiredClusterId(), Buff.getRequiredHostNodeId()); schema.getTableConfigurationFile().deleteConfigurationFileByHostIdx(Authorization, Buff.getRequiredClusterId(), Buff.getRequiredHostNodeId()); SortedMap<CFAstHostNodePKey, CFAstHostNodeBuff> subdict; dictByPKey.remove(pkey); subdict = dictByClusterIdx.get(keyClusterIdx); subdict.remove(pkey); dictByUDescrIdx.remove(keyUDescrIdx); dictByHostNameIdx.remove(keyHostNameIdx); } public void deleteHostNodeByIdIdx(CFAstAuthorization Authorization, long argClusterId, long argHostNodeId) { CFAstHostNodePKey key = schema.getFactoryHostNode().newPKey(); key.setRequiredClusterId(argClusterId); key.setRequiredHostNodeId(argHostNodeId); deleteHostNodeByIdIdx(Authorization, key); } public void deleteHostNodeByIdIdx(CFAstAuthorization Authorization, CFAstHostNodePKey argKey) { CFAstHostNodeBuff cur; LinkedList<CFAstHostNodeBuff> matchSet = new LinkedList<CFAstHostNodeBuff>(); Iterator<CFAstHostNodeBuff> values = dictByPKey.values().iterator(); while (values.hasNext()) { cur = values.next(); if (argKey.equals(cur)) { matchSet.add(cur); } } Iterator<CFAstHostNodeBuff> iterMatch = matchSet.iterator(); while (iterMatch.hasNext()) { cur = iterMatch.next(); deleteHostNode(Authorization, cur); } } public void deleteHostNodeByClusterIdx(CFAstAuthorization Authorization, long argClusterId) { CFAstHostNodeByClusterIdxKey key = schema.getFactoryHostNode().newClusterIdxKey(); key.setRequiredClusterId(argClusterId); deleteHostNodeByClusterIdx(Authorization, key); } public void deleteHostNodeByClusterIdx(CFAstAuthorization Authorization, CFAstHostNodeByClusterIdxKey argKey) { CFAstHostNodeBuff cur; LinkedList<CFAstHostNodeBuff> matchSet = new LinkedList<CFAstHostNodeBuff>(); Iterator<CFAstHostNodeBuff> values = dictByPKey.values().iterator(); while (values.hasNext()) { cur = values.next(); if (argKey.equals(cur)) { matchSet.add(cur); } } Iterator<CFAstHostNodeBuff> iterMatch = matchSet.iterator(); while (iterMatch.hasNext()) { cur = iterMatch.next(); deleteHostNode(Authorization, cur); } } public void deleteHostNodeByUDescrIdx(CFAstAuthorization Authorization, long argClusterId, String argDescription) { CFAstHostNodeByUDescrIdxKey key = schema.getFactoryHostNode().newUDescrIdxKey(); key.setRequiredClusterId(argClusterId); key.setRequiredDescription(argDescription); deleteHostNodeByUDescrIdx(Authorization, key); } public void deleteHostNodeByUDescrIdx(CFAstAuthorization Authorization, CFAstHostNodeByUDescrIdxKey argKey) { CFAstHostNodeBuff cur; LinkedList<CFAstHostNodeBuff> matchSet = new LinkedList<CFAstHostNodeBuff>(); Iterator<CFAstHostNodeBuff> values = dictByPKey.values().iterator(); while (values.hasNext()) { cur = values.next(); if (argKey.equals(cur)) { matchSet.add(cur); } } Iterator<CFAstHostNodeBuff> iterMatch = matchSet.iterator(); while (iterMatch.hasNext()) { cur = iterMatch.next(); deleteHostNode(Authorization, cur); } } public void deleteHostNodeByHostNameIdx(CFAstAuthorization Authorization, long argClusterId, String argHostName) { CFAstHostNodeByHostNameIdxKey key = schema.getFactoryHostNode().newHostNameIdxKey(); key.setRequiredClusterId(argClusterId); key.setRequiredHostName(argHostName); deleteHostNodeByHostNameIdx(Authorization, key); } public void deleteHostNodeByHostNameIdx(CFAstAuthorization Authorization, CFAstHostNodeByHostNameIdxKey argKey) { CFAstHostNodeBuff cur; LinkedList<CFAstHostNodeBuff> matchSet = new LinkedList<CFAstHostNodeBuff>(); Iterator<CFAstHostNodeBuff> values = dictByPKey.values().iterator(); while (values.hasNext()) { cur = values.next(); if (argKey.equals(cur)) { matchSet.add(cur); } } Iterator<CFAstHostNodeBuff> iterMatch = matchSet.iterator(); while (iterMatch.hasNext()) { cur = iterMatch.next(); deleteHostNode(Authorization, cur); } } public CFAstCursor openHostNodeCursorAll(CFAstAuthorization Authorization) { CFAstCursor cursor = new CFAstRamHostNodeCursor(Authorization, schema, dictByPKey.values()); return (cursor); } public CFAstCursor openHostNodeCursorByClusterIdx(CFAstAuthorization Authorization, long ClusterId) { CFAstCursor cursor; CFAstHostNodeByClusterIdxKey key = schema.getFactoryHostNode().newClusterIdxKey(); key.setRequiredClusterId(ClusterId); if (dictByClusterIdx.containsKey(key)) { SortedMap<CFAstHostNodePKey, CFAstHostNodeBuff> subdictClusterIdx = dictByClusterIdx.get(key); cursor = new CFAstRamHostNodeCursor(Authorization, schema, subdictClusterIdx.values()); } else { cursor = new CFAstRamHostNodeCursor(Authorization, schema, new ArrayList<CFAstHostNodeBuff>()); } return (cursor); } public void closeHostNodeCursor(CFAstCursor Cursor) { // Cursor.DataReader.Close(); } public CFAstHostNodeBuff nextHostNodeCursor(CFAstCursor Cursor) { CFAstRamHostNodeCursor cursor = (CFAstRamHostNodeCursor) Cursor; CFAstHostNodeBuff rec = cursor.getCursor().next(); cursor.setRowIdx(cursor.getRowIdx() + 1); return (rec); } public CFAstHostNodeBuff prevHostNodeCursor(CFAstCursor Cursor) { int targetRowIdx = (Cursor.getRowIdx() > 1) ? Cursor.getRowIdx() - 1 : 1; CFAstHostNodeBuff rec = null; if (Cursor.getRowIdx() >= targetRowIdx) { Cursor.reset(); } while (Cursor.getRowIdx() < targetRowIdx) { rec = nextHostNodeCursor(Cursor); } return (rec); } public CFAstHostNodeBuff firstHostNodeCursor(CFAstCursor Cursor) { int targetRowIdx = 1; CFAstHostNodeBuff rec = null; Cursor.reset(); while (Cursor.getRowIdx() < targetRowIdx) { rec = nextHostNodeCursor(Cursor); } return (rec); } public CFAstHostNodeBuff lastHostNodeCursor(CFAstCursor Cursor) { throw CFLib.getDefaultExceptionFactory().newNotImplementedYetException(getClass(), "lastHostNodeCursor"); } public CFAstHostNodeBuff nthHostNodeCursor(CFAstCursor Cursor, int Idx) { int targetRowIdx = Idx; CFAstHostNodeBuff rec = null; if (Cursor.getRowIdx() >= targetRowIdx) { Cursor.reset(); } while (Cursor.getRowIdx() < targetRowIdx) { rec = nextHostNodeCursor(Cursor); } return (rec); } public void releasePreparedStatements() { } }