Java tutorial
// Description: Java7 in-memory RAM DbIO implementation for HostNode. /* * MSS Code Factory Accounting Business Application Model * * Copyright (c) 2014 Mark Sobkow * * This program is available as free software under the GNU GPL v3, or * under a commercial license from Mark Sobkow. For commercial licensing * details, please contact msobkow@sasktel.net. * * Under the terms of the GPL: * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * This source code incorporates modified modules originally licensed * under the Apache 2.0 license by MSS Code Factory including CFSecurity * (net-sourceforge-msscodefactory-2.0-cfsecurity.xml), * CFInternet (net-sourceforge-msscodefactory-2.0-cfinternet.xml), and * CFCrm 2.0 (net-sourceforge-msscodefactory-2.0-cfcrm.xml), with all of the * required models being available as part of the MSS Code Factory 1.11 * distribution source and install zips. * * You can download installations of MSS Code Factory 1.11 from * http://msscodefactory.sourceforge.net/ * * *********************************************************************** * * Code manufactured by MSS Code Factory */ package net.sourceforge.msscodefactory.cfacc.v2_0.CFAccRam; import java.sql.*; import java.util.*; import net.sourceforge.msscodefactory.cflib.v1_11.CFLib.*; import org.apache.commons.codec.binary.Base64; import net.sourceforge.msscodefactory.cfacc.v2_0.CFAcc.*; import net.sourceforge.msscodefactory.cfacc.v2_0.CFAccRam.*; /* * CFAccRamHostNodeTable in-memory RAM DbIO implementation * for HostNode. */ public class CFAccRamHostNodeTable implements ICFAccHostNodeTable { private CFAccRamSchema schema; private Map<CFAccHostNodePKey, CFAccHostNodeBuff> dictByPKey = new HashMap<CFAccHostNodePKey, CFAccHostNodeBuff>(); private SortedMap<CFAccHostNodeByClusterIdxKey, SortedMap<CFAccHostNodePKey, CFAccHostNodeBuff>> dictByClusterIdx = new TreeMap<CFAccHostNodeByClusterIdxKey, SortedMap<CFAccHostNodePKey, CFAccHostNodeBuff>>(); private SortedMap<CFAccHostNodeByUDescrIdxKey, CFAccHostNodeBuff> dictByUDescrIdx = new TreeMap<CFAccHostNodeByUDescrIdxKey, CFAccHostNodeBuff>(); private SortedMap<CFAccHostNodeByHostNameIdxKey, CFAccHostNodeBuff> dictByHostNameIdx = new TreeMap<CFAccHostNodeByHostNameIdxKey, CFAccHostNodeBuff>(); public CFAccRamHostNodeTable(CFAccRamSchema argSchema) { schema = argSchema; } public void createHostNode(CFAccAuthorization Authorization, CFAccHostNodeBuff Buff) { CFAccHostNodePKey pkey = schema.getFactoryHostNode().newPKey(); pkey.setRequiredClusterId(Buff.getRequiredClusterId()); pkey.setRequiredHostNodeId(((CFAccRamClusterTable) schema.getTableCluster()) .nextHostNodeIdGen(Authorization, Buff.getRequiredClusterId())); Buff.setRequiredClusterId(pkey.getRequiredClusterId()); Buff.setRequiredHostNodeId(pkey.getRequiredHostNodeId()); CFAccHostNodeByClusterIdxKey keyClusterIdx = schema.getFactoryHostNode().newClusterIdxKey(); keyClusterIdx.setRequiredClusterId(Buff.getRequiredClusterId()); CFAccHostNodeByUDescrIdxKey keyUDescrIdx = schema.getFactoryHostNode().newUDescrIdxKey(); keyUDescrIdx.setRequiredClusterId(Buff.getRequiredClusterId()); keyUDescrIdx.setRequiredDescription(Buff.getRequiredDescription()); CFAccHostNodeByHostNameIdxKey keyHostNameIdx = schema.getFactoryHostNode().newHostNameIdxKey(); keyHostNameIdx.setRequiredClusterId(Buff.getRequiredClusterId()); keyHostNameIdx.setRequiredHostName(Buff.getRequiredHostName()); // Validate unique indexes if (dictByPKey.containsKey(pkey)) { throw CFLib.getDefaultExceptionFactory().newPrimaryKeyNotNewException(getClass(), "createHostNode", pkey); } if (dictByUDescrIdx.containsKey(keyUDescrIdx)) { throw CFLib.getDefaultExceptionFactory().newUniqueIndexViolationException(getClass(), "createHostNode", "HostNodeUDescrIdx", keyUDescrIdx); } if (dictByHostNameIdx.containsKey(keyHostNameIdx)) { throw CFLib.getDefaultExceptionFactory().newUniqueIndexViolationException(getClass(), "createHostNode", "HostNodeUHostNameIdx", keyHostNameIdx); } // Validate foreign keys { boolean allNull = true; allNull = false; if (!allNull) { if (null == schema.getTableCluster().readDerivedByIdIdx(Authorization, Buff.getRequiredClusterId())) { throw CFLib.getDefaultExceptionFactory().newUnresolvedRelationException(getClass(), "createHostNode", "Container", "HostNodeCluster", "Cluster", null); } } } // Proceed with adding the new record dictByPKey.put(pkey, Buff); SortedMap<CFAccHostNodePKey, CFAccHostNodeBuff> subdictClusterIdx; if (dictByClusterIdx.containsKey(keyClusterIdx)) { subdictClusterIdx = dictByClusterIdx.get(keyClusterIdx); } else { subdictClusterIdx = new TreeMap<CFAccHostNodePKey, CFAccHostNodeBuff>(); dictByClusterIdx.put(keyClusterIdx, subdictClusterIdx); } subdictClusterIdx.put(pkey, Buff); dictByUDescrIdx.put(keyUDescrIdx, Buff); dictByHostNameIdx.put(keyHostNameIdx, Buff); } public CFAccHostNodeBuff readDerived(CFAccAuthorization Authorization, CFAccHostNodePKey PKey) { final String S_ProcName = "CFAccRamHostNode.readDerived() "; CFAccHostNodePKey key = schema.getFactoryHostNode().newPKey(); key.setRequiredClusterId(PKey.getRequiredClusterId()); key.setRequiredHostNodeId(PKey.getRequiredHostNodeId()); CFAccHostNodeBuff buff; if (dictByPKey.containsKey(key)) { buff = dictByPKey.get(key); } else { buff = null; } return (buff); } public CFAccHostNodeBuff lockDerived(CFAccAuthorization Authorization, CFAccHostNodePKey PKey) { final String S_ProcName = "CFAccRamHostNode.readDerived() "; CFAccHostNodePKey key = schema.getFactoryHostNode().newPKey(); key.setRequiredClusterId(PKey.getRequiredClusterId()); key.setRequiredHostNodeId(PKey.getRequiredHostNodeId()); CFAccHostNodeBuff buff; if (dictByPKey.containsKey(key)) { buff = dictByPKey.get(key); } else { buff = null; } return (buff); } public CFAccHostNodeBuff[] readAllDerived(CFAccAuthorization Authorization) { final String S_ProcName = "CFAccRamHostNode.readAllDerived() "; CFAccHostNodeBuff[] retList = new CFAccHostNodeBuff[dictByPKey.values().size()]; Iterator<CFAccHostNodeBuff> iter = dictByPKey.values().iterator(); int idx = 0; while (iter.hasNext()) { retList[idx++] = iter.next(); } return (retList); } public CFAccHostNodeBuff[] readDerivedByClusterIdx(CFAccAuthorization Authorization, long ClusterId) { final String S_ProcName = "CFAccRamHostNode.readDerivedByClusterIdx() "; CFAccHostNodeByClusterIdxKey key = schema.getFactoryHostNode().newClusterIdxKey(); key.setRequiredClusterId(ClusterId); CFAccHostNodeBuff[] recArray; if (dictByClusterIdx.containsKey(key)) { SortedMap<CFAccHostNodePKey, CFAccHostNodeBuff> subdictClusterIdx = dictByClusterIdx.get(key); recArray = new CFAccHostNodeBuff[subdictClusterIdx.size()]; Iterator<CFAccHostNodeBuff> iter = subdictClusterIdx.values().iterator(); int idx = 0; while (iter.hasNext()) { recArray[idx++] = iter.next(); } } else { recArray = new CFAccHostNodeBuff[0]; } return (recArray); } public CFAccHostNodeBuff readDerivedByUDescrIdx(CFAccAuthorization Authorization, long ClusterId, String Description) { final String S_ProcName = "CFAccRamHostNode.readDerivedByUDescrIdx() "; CFAccHostNodeByUDescrIdxKey key = schema.getFactoryHostNode().newUDescrIdxKey(); key.setRequiredClusterId(ClusterId); key.setRequiredDescription(Description); CFAccHostNodeBuff buff; if (dictByUDescrIdx.containsKey(key)) { buff = dictByUDescrIdx.get(key); } else { buff = null; } return (buff); } public CFAccHostNodeBuff readDerivedByHostNameIdx(CFAccAuthorization Authorization, long ClusterId, String HostName) { final String S_ProcName = "CFAccRamHostNode.readDerivedByHostNameIdx() "; CFAccHostNodeByHostNameIdxKey key = schema.getFactoryHostNode().newHostNameIdxKey(); key.setRequiredClusterId(ClusterId); key.setRequiredHostName(HostName); CFAccHostNodeBuff buff; if (dictByHostNameIdx.containsKey(key)) { buff = dictByHostNameIdx.get(key); } else { buff = null; } return (buff); } public CFAccHostNodeBuff readDerivedByIdIdx(CFAccAuthorization Authorization, long ClusterId, long HostNodeId) { final String S_ProcName = "CFAccRamHostNode.readDerivedByIdIdx() "; CFAccHostNodePKey key = schema.getFactoryHostNode().newPKey(); key.setRequiredClusterId(ClusterId); key.setRequiredHostNodeId(HostNodeId); CFAccHostNodeBuff buff; if (dictByPKey.containsKey(key)) { buff = dictByPKey.get(key); } else { buff = null; } return (buff); } public CFAccHostNodeBuff readBuff(CFAccAuthorization Authorization, CFAccHostNodePKey PKey) { final String S_ProcName = "CFAccRamHostNode.readBuff() "; CFAccHostNodeBuff buff = readDerived(Authorization, PKey); if ((buff != null) && (!buff.getClassCode().equals("HSND"))) { buff = null; } return (buff); } public CFAccHostNodeBuff lockBuff(CFAccAuthorization Authorization, CFAccHostNodePKey PKey) { final String S_ProcName = "CFAccRamHostNode.readBuff() "; CFAccHostNodeBuff buff = readDerived(Authorization, PKey); if ((buff != null) && (!buff.getClassCode().equals("HSND"))) { buff = null; } return (buff); } public CFAccHostNodeBuff[] readAllBuff(CFAccAuthorization Authorization) { final String S_ProcName = "CFAccRamHostNode.readAllBuff() "; CFAccHostNodeBuff buff; ArrayList<CFAccHostNodeBuff> filteredList = new ArrayList<CFAccHostNodeBuff>(); CFAccHostNodeBuff[] buffList = readAllDerived(Authorization); for (int idx = 0; idx < buffList.length; idx++) { buff = buffList[idx]; if ((buff != null) && buff.getClassCode().equals("HSND")) { filteredList.add(buff); } } return (filteredList.toArray(new CFAccHostNodeBuff[0])); } public CFAccHostNodeBuff readBuffByIdIdx(CFAccAuthorization Authorization, long ClusterId, long HostNodeId) { final String S_ProcName = "CFAccRamHostNode.readBuffByIdIdx() "; CFAccHostNodeBuff buff = readDerivedByIdIdx(Authorization, ClusterId, HostNodeId); if ((buff != null) && buff.getClassCode().equals("HSND")) { return ((CFAccHostNodeBuff) buff); } else { return (null); } } public CFAccHostNodeBuff[] readBuffByClusterIdx(CFAccAuthorization Authorization, long ClusterId) { final String S_ProcName = "CFAccRamHostNode.readBuffByClusterIdx() "; CFAccHostNodeBuff buff; ArrayList<CFAccHostNodeBuff> filteredList = new ArrayList<CFAccHostNodeBuff>(); CFAccHostNodeBuff[] buffList = readDerivedByClusterIdx(Authorization, ClusterId); for (int idx = 0; idx < buffList.length; idx++) { buff = buffList[idx]; if ((buff != null) && buff.getClassCode().equals("HSND")) { filteredList.add((CFAccHostNodeBuff) buff); } } return (filteredList.toArray(new CFAccHostNodeBuff[0])); } public CFAccHostNodeBuff readBuffByUDescrIdx(CFAccAuthorization Authorization, long ClusterId, String Description) { final String S_ProcName = "CFAccRamHostNode.readBuffByUDescrIdx() "; CFAccHostNodeBuff buff = readDerivedByUDescrIdx(Authorization, ClusterId, Description); if ((buff != null) && buff.getClassCode().equals("HSND")) { return ((CFAccHostNodeBuff) buff); } else { return (null); } } public CFAccHostNodeBuff readBuffByHostNameIdx(CFAccAuthorization Authorization, long ClusterId, String HostName) { final String S_ProcName = "CFAccRamHostNode.readBuffByHostNameIdx() "; CFAccHostNodeBuff buff = readDerivedByHostNameIdx(Authorization, ClusterId, HostName); if ((buff != null) && buff.getClassCode().equals("HSND")) { return ((CFAccHostNodeBuff) buff); } else { return (null); } } public void updateHostNode(CFAccAuthorization Authorization, CFAccHostNodeBuff Buff) { CFAccHostNodePKey pkey = schema.getFactoryHostNode().newPKey(); pkey.setRequiredClusterId(Buff.getRequiredClusterId()); pkey.setRequiredHostNodeId(Buff.getRequiredHostNodeId()); CFAccHostNodeBuff existing = dictByPKey.get(pkey); if (existing == null) { throw CFLib.getDefaultExceptionFactory().newStaleCacheDetectedException(getClass(), "updateHostNode", "Existing record not found", "HostNode", pkey); } if (existing.getRequiredRevision() != Buff.getRequiredRevision()) { throw CFLib.getDefaultExceptionFactory().newCollisionDetectedException(getClass(), "updateHostNode", pkey); } Buff.setRequiredRevision(Buff.getRequiredRevision() + 1); CFAccHostNodeByClusterIdxKey existingKeyClusterIdx = schema.getFactoryHostNode().newClusterIdxKey(); existingKeyClusterIdx.setRequiredClusterId(existing.getRequiredClusterId()); CFAccHostNodeByClusterIdxKey newKeyClusterIdx = schema.getFactoryHostNode().newClusterIdxKey(); newKeyClusterIdx.setRequiredClusterId(Buff.getRequiredClusterId()); CFAccHostNodeByUDescrIdxKey existingKeyUDescrIdx = schema.getFactoryHostNode().newUDescrIdxKey(); existingKeyUDescrIdx.setRequiredClusterId(existing.getRequiredClusterId()); existingKeyUDescrIdx.setRequiredDescription(existing.getRequiredDescription()); CFAccHostNodeByUDescrIdxKey newKeyUDescrIdx = schema.getFactoryHostNode().newUDescrIdxKey(); newKeyUDescrIdx.setRequiredClusterId(Buff.getRequiredClusterId()); newKeyUDescrIdx.setRequiredDescription(Buff.getRequiredDescription()); CFAccHostNodeByHostNameIdxKey existingKeyHostNameIdx = schema.getFactoryHostNode().newHostNameIdxKey(); existingKeyHostNameIdx.setRequiredClusterId(existing.getRequiredClusterId()); existingKeyHostNameIdx.setRequiredHostName(existing.getRequiredHostName()); CFAccHostNodeByHostNameIdxKey newKeyHostNameIdx = schema.getFactoryHostNode().newHostNameIdxKey(); newKeyHostNameIdx.setRequiredClusterId(Buff.getRequiredClusterId()); newKeyHostNameIdx.setRequiredHostName(Buff.getRequiredHostName()); // Check unique indexes if (!existingKeyUDescrIdx.equals(newKeyUDescrIdx)) { if (dictByUDescrIdx.containsKey(newKeyUDescrIdx)) { throw CFLib.getDefaultExceptionFactory().newUniqueIndexViolationException(getClass(), "updateHostNode", "HostNodeUDescrIdx", newKeyUDescrIdx); } } if (!existingKeyHostNameIdx.equals(newKeyHostNameIdx)) { if (dictByHostNameIdx.containsKey(newKeyHostNameIdx)) { throw CFLib.getDefaultExceptionFactory().newUniqueIndexViolationException(getClass(), "updateHostNode", "HostNodeUHostNameIdx", newKeyHostNameIdx); } } // Validate foreign keys { boolean allNull = true; if (allNull) { if (null == schema.getTableCluster().readDerivedByIdIdx(Authorization, Buff.getRequiredClusterId())) { throw CFLib.getDefaultExceptionFactory().newUnresolvedRelationException(getClass(), "updateHostNode", "Container", "HostNodeCluster", "Cluster", null); } } } // Update is valid SortedMap<CFAccHostNodePKey, CFAccHostNodeBuff> subdict; dictByPKey.remove(pkey); dictByPKey.put(pkey, Buff); subdict = dictByClusterIdx.get(existingKeyClusterIdx); if (subdict != null) { subdict.remove(pkey); } if (dictByClusterIdx.containsKey(newKeyClusterIdx)) { subdict = dictByClusterIdx.get(newKeyClusterIdx); } else { subdict = new TreeMap<CFAccHostNodePKey, CFAccHostNodeBuff>(); dictByClusterIdx.put(newKeyClusterIdx, subdict); } subdict.put(pkey, Buff); dictByUDescrIdx.remove(existingKeyUDescrIdx); dictByUDescrIdx.put(newKeyUDescrIdx, Buff); dictByHostNameIdx.remove(existingKeyHostNameIdx); dictByHostNameIdx.put(newKeyHostNameIdx, Buff); } public void deleteHostNode(CFAccAuthorization Authorization, CFAccHostNodeBuff Buff) { final String S_ProcName = "CFAccRamHostNodeTable.deleteHostNode() "; CFAccHostNodePKey pkey = schema.getFactoryHostNode().newPKey(); pkey.setRequiredClusterId(Buff.getRequiredClusterId()); pkey.setRequiredHostNodeId(Buff.getRequiredHostNodeId()); CFAccHostNodeBuff existing = dictByPKey.get(pkey); if (existing == null) { return; } if (existing.getRequiredRevision() != Buff.getRequiredRevision()) { throw CFLib.getDefaultExceptionFactory().newCollisionDetectedException(getClass(), "deleteHostNode", pkey); } CFAccHostNodeByClusterIdxKey keyClusterIdx = schema.getFactoryHostNode().newClusterIdxKey(); keyClusterIdx.setRequiredClusterId(existing.getRequiredClusterId()); CFAccHostNodeByUDescrIdxKey keyUDescrIdx = schema.getFactoryHostNode().newUDescrIdxKey(); keyUDescrIdx.setRequiredClusterId(existing.getRequiredClusterId()); keyUDescrIdx.setRequiredDescription(existing.getRequiredDescription()); CFAccHostNodeByHostNameIdxKey keyHostNameIdx = schema.getFactoryHostNode().newHostNameIdxKey(); keyHostNameIdx.setRequiredClusterId(existing.getRequiredClusterId()); keyHostNameIdx.setRequiredHostName(existing.getRequiredHostName()); // Validate reverse foreign keys // Delete is valid schema.getTableService().deleteServiceByHostIdx(Authorization, Buff.getRequiredClusterId(), Buff.getRequiredHostNodeId()); SortedMap<CFAccHostNodePKey, CFAccHostNodeBuff> subdict; dictByPKey.remove(pkey); subdict = dictByClusterIdx.get(keyClusterIdx); subdict.remove(pkey); dictByUDescrIdx.remove(keyUDescrIdx); dictByHostNameIdx.remove(keyHostNameIdx); } public void deleteHostNodeByIdIdx(CFAccAuthorization Authorization, long argClusterId, long argHostNodeId) { CFAccHostNodePKey key = schema.getFactoryHostNode().newPKey(); key.setRequiredClusterId(argClusterId); key.setRequiredHostNodeId(argHostNodeId); deleteHostNodeByIdIdx(Authorization, key); } public void deleteHostNodeByIdIdx(CFAccAuthorization Authorization, CFAccHostNodePKey argKey) { CFAccHostNodeBuff cur; LinkedList<CFAccHostNodeBuff> matchSet = new LinkedList<CFAccHostNodeBuff>(); Iterator<CFAccHostNodeBuff> values = dictByPKey.values().iterator(); while (values.hasNext()) { cur = values.next(); if (argKey.equals(cur)) { matchSet.add(cur); } } Iterator<CFAccHostNodeBuff> iterMatch = matchSet.iterator(); while (iterMatch.hasNext()) { cur = iterMatch.next(); deleteHostNode(Authorization, cur); } } public void deleteHostNodeByClusterIdx(CFAccAuthorization Authorization, long argClusterId) { CFAccHostNodeByClusterIdxKey key = schema.getFactoryHostNode().newClusterIdxKey(); key.setRequiredClusterId(argClusterId); deleteHostNodeByClusterIdx(Authorization, key); } public void deleteHostNodeByClusterIdx(CFAccAuthorization Authorization, CFAccHostNodeByClusterIdxKey argKey) { CFAccHostNodeBuff cur; LinkedList<CFAccHostNodeBuff> matchSet = new LinkedList<CFAccHostNodeBuff>(); Iterator<CFAccHostNodeBuff> values = dictByPKey.values().iterator(); while (values.hasNext()) { cur = values.next(); if (argKey.equals(cur)) { matchSet.add(cur); } } Iterator<CFAccHostNodeBuff> iterMatch = matchSet.iterator(); while (iterMatch.hasNext()) { cur = iterMatch.next(); deleteHostNode(Authorization, cur); } } public void deleteHostNodeByUDescrIdx(CFAccAuthorization Authorization, long argClusterId, String argDescription) { CFAccHostNodeByUDescrIdxKey key = schema.getFactoryHostNode().newUDescrIdxKey(); key.setRequiredClusterId(argClusterId); key.setRequiredDescription(argDescription); deleteHostNodeByUDescrIdx(Authorization, key); } public void deleteHostNodeByUDescrIdx(CFAccAuthorization Authorization, CFAccHostNodeByUDescrIdxKey argKey) { CFAccHostNodeBuff cur; LinkedList<CFAccHostNodeBuff> matchSet = new LinkedList<CFAccHostNodeBuff>(); Iterator<CFAccHostNodeBuff> values = dictByPKey.values().iterator(); while (values.hasNext()) { cur = values.next(); if (argKey.equals(cur)) { matchSet.add(cur); } } Iterator<CFAccHostNodeBuff> iterMatch = matchSet.iterator(); while (iterMatch.hasNext()) { cur = iterMatch.next(); deleteHostNode(Authorization, cur); } } public void deleteHostNodeByHostNameIdx(CFAccAuthorization Authorization, long argClusterId, String argHostName) { CFAccHostNodeByHostNameIdxKey key = schema.getFactoryHostNode().newHostNameIdxKey(); key.setRequiredClusterId(argClusterId); key.setRequiredHostName(argHostName); deleteHostNodeByHostNameIdx(Authorization, key); } public void deleteHostNodeByHostNameIdx(CFAccAuthorization Authorization, CFAccHostNodeByHostNameIdxKey argKey) { CFAccHostNodeBuff cur; LinkedList<CFAccHostNodeBuff> matchSet = new LinkedList<CFAccHostNodeBuff>(); Iterator<CFAccHostNodeBuff> values = dictByPKey.values().iterator(); while (values.hasNext()) { cur = values.next(); if (argKey.equals(cur)) { matchSet.add(cur); } } Iterator<CFAccHostNodeBuff> iterMatch = matchSet.iterator(); while (iterMatch.hasNext()) { cur = iterMatch.next(); deleteHostNode(Authorization, cur); } } public CFAccCursor openHostNodeCursorAll(CFAccAuthorization Authorization) { CFAccCursor cursor = new CFAccRamHostNodeCursor(Authorization, schema, dictByPKey.values()); return (cursor); } public CFAccCursor openHostNodeCursorByClusterIdx(CFAccAuthorization Authorization, long ClusterId) { CFAccCursor cursor; CFAccHostNodeByClusterIdxKey key = schema.getFactoryHostNode().newClusterIdxKey(); key.setRequiredClusterId(ClusterId); if (dictByClusterIdx.containsKey(key)) { SortedMap<CFAccHostNodePKey, CFAccHostNodeBuff> subdictClusterIdx = dictByClusterIdx.get(key); cursor = new CFAccRamHostNodeCursor(Authorization, schema, subdictClusterIdx.values()); } else { cursor = new CFAccRamHostNodeCursor(Authorization, schema, new ArrayList<CFAccHostNodeBuff>()); } return (cursor); } public void closeHostNodeCursor(CFAccCursor Cursor) { // Cursor.DataReader.Close(); } public CFAccHostNodeBuff nextHostNodeCursor(CFAccCursor Cursor) { CFAccRamHostNodeCursor cursor = (CFAccRamHostNodeCursor) Cursor; CFAccHostNodeBuff rec = cursor.getCursor().next(); cursor.setRowIdx(cursor.getRowIdx() + 1); return (rec); } public CFAccHostNodeBuff prevHostNodeCursor(CFAccCursor Cursor) { int targetRowIdx = (Cursor.getRowIdx() > 1) ? Cursor.getRowIdx() - 1 : 1; CFAccHostNodeBuff rec = null; if (Cursor.getRowIdx() >= targetRowIdx) { Cursor.reset(); } while (Cursor.getRowIdx() < targetRowIdx) { rec = nextHostNodeCursor(Cursor); } return (rec); } public CFAccHostNodeBuff firstHostNodeCursor(CFAccCursor Cursor) { int targetRowIdx = 1; CFAccHostNodeBuff rec = null; Cursor.reset(); while (Cursor.getRowIdx() < targetRowIdx) { rec = nextHostNodeCursor(Cursor); } return (rec); } public CFAccHostNodeBuff lastHostNodeCursor(CFAccCursor Cursor) { throw CFLib.getDefaultExceptionFactory().newNotImplementedYetException(getClass(), "lastHostNodeCursor"); } public CFAccHostNodeBuff nthHostNodeCursor(CFAccCursor Cursor, int Idx) { int targetRowIdx = Idx; CFAccHostNodeBuff rec = null; if (Cursor.getRowIdx() >= targetRowIdx) { Cursor.reset(); } while (Cursor.getRowIdx() < targetRowIdx) { rec = nextHostNodeCursor(Cursor); } return (rec); } public void releasePreparedStatements() { } }