List of usage examples for java.util IdentityHashMap IdentityHashMap
public IdentityHashMap()
From source file:de.micromata.genome.util.bean.PrivateBeanUtils.java
/** * Gets the bean size./* w w w. j a v a 2 s .c o m*/ * * @param bean the bean * @param classNameMatcher the class name matcher * @param fieldNameMatcher matches agains (decl class name).fieldname * @return the bean size */ public static int getBeanSize(Object bean, Matcher<String> classNameMatcher, Matcher<String> fieldNameMatcher) { IdentityHashMap<Object, Object> m = new IdentityHashMap<Object, Object>(); return getBeanSize(bean, m, classNameMatcher, fieldNameMatcher); }
From source file:com.google.gwt.emultest.java.util.IdentityHashMapTest.java
/** * Test that the implementation differs from a standard map in demanding * identity./*www . j av a2s .c o m*/ */ public void testIdentityBasedHashCode() { IdentityHashMap hashMap1 = new IdentityHashMap(); checkEmptyHashMapAssumptions(hashMap1); IdentityHashMap hashMap2 = new IdentityHashMap(); checkEmptyHashMapAssumptions(hashMap2); hashMap1.put(new Foo(), VALUE_1); hashMap2.put(new Foo(), VALUE_1); if (!TestUtils.isJvm()) { // Only reliable in Production Mode since Development Mode can have // identity hash collisions. assertFalse(hashMap1.hashCode() == hashMap2.hashCode()); } }
From source file:com.github.nmorel.gwtjackson.client.JsonSerializationContext.java
public void addObjectId(Object object, ObjectIdSerializer<?> id) { if (null == mapObjectId) { if (useEqualityForObjectId) { mapObjectId = new HashMap<Object, ObjectIdSerializer<?>>(); } else {/*from w w w .j a v a 2 s .c o m*/ mapObjectId = new IdentityHashMap<Object, ObjectIdSerializer<?>>(); } } mapObjectId.put(object, id); }
From source file:org.optaplanner.benchmark.impl.result.PlannerBenchmarkResult.java
protected static PlannerBenchmarkResult createMergeSingleton( List<SingleBenchmarkResult> singleBenchmarkResultList) { PlannerBenchmarkResult newResult = null; Map<PlannerBenchmarkResult, PlannerBenchmarkResult> mergeMap = new IdentityHashMap<PlannerBenchmarkResult, PlannerBenchmarkResult>(); for (SingleBenchmarkResult singleBenchmarkResult : singleBenchmarkResultList) { PlannerBenchmarkResult oldResult = singleBenchmarkResult.getSolverBenchmarkResult() .getPlannerBenchmarkResult(); if (!mergeMap.containsKey(oldResult)) { if (newResult == null) { newResult = new PlannerBenchmarkResult(); newResult.setAggregation(true); newResult.availableProcessors = oldResult.availableProcessors; newResult.loggingLevel = oldResult.loggingLevel; newResult.maxMemory = oldResult.maxMemory; newResult.optaPlannerVersion = oldResult.optaPlannerVersion; newResult.javaVersion = oldResult.javaVersion; newResult.javaVM = oldResult.javaVM; newResult.operatingSystem = oldResult.operatingSystem; newResult.parallelBenchmarkCount = oldResult.parallelBenchmarkCount; newResult.warmUpTimeMillisSpentLimit = oldResult.warmUpTimeMillisSpentLimit; newResult.environmentMode = oldResult.environmentMode; newResult.solverBenchmarkResultList = new ArrayList<SolverBenchmarkResult>(); newResult.unifiedProblemBenchmarkResultList = new ArrayList<ProblemBenchmarkResult>(); newResult.startingTimestamp = null; newResult.benchmarkTimeMillisSpent = null; } else { newResult.availableProcessors = ConfigUtils.mergeProperty(newResult.availableProcessors, oldResult.availableProcessors); newResult.loggingLevel = ConfigUtils.mergeProperty(newResult.loggingLevel, oldResult.loggingLevel); newResult.maxMemory = ConfigUtils.mergeProperty(newResult.maxMemory, oldResult.maxMemory); newResult.optaPlannerVersion = ConfigUtils.mergeProperty(newResult.optaPlannerVersion, oldResult.optaPlannerVersion); newResult.javaVersion = ConfigUtils.mergeProperty(newResult.javaVersion, oldResult.javaVersion); newResult.javaVM = ConfigUtils.mergeProperty(newResult.javaVM, oldResult.javaVM); newResult.operatingSystem = ConfigUtils.mergeProperty(newResult.operatingSystem, oldResult.operatingSystem); newResult.parallelBenchmarkCount = ConfigUtils.mergeProperty(newResult.parallelBenchmarkCount, oldResult.parallelBenchmarkCount); newResult.warmUpTimeMillisSpentLimit = ConfigUtils.mergeProperty( newResult.warmUpTimeMillisSpentLimit, oldResult.warmUpTimeMillisSpentLimit); newResult.environmentMode = ConfigUtils.mergeProperty(newResult.environmentMode, oldResult.environmentMode); }/*from ww w . j a v a 2 s. c om*/ mergeMap.put(oldResult, newResult); } } return newResult; }
From source file:com.google.gwt.emultest.java.util.IdentityHashMapTest.java
public void testIsEmpty() { IdentityHashMap srcMap = new IdentityHashMap(); checkEmptyHashMapAssumptions(srcMap); IdentityHashMap dstMap = new IdentityHashMap(); checkEmptyHashMapAssumptions(dstMap); dstMap.putAll(srcMap);//from w w w . j av a 2 s . c o m assertTrue(dstMap.isEmpty()); dstMap.put(KEY_KEY, VALUE_VAL); assertFalse(dstMap.isEmpty()); dstMap.remove(KEY_KEY); assertTrue(dstMap.isEmpty()); assertEquals(dstMap.size(), 0); }
From source file:com.exadel.flamingo.flex.messaging.amf.io.AMF0Serializer.java
private void clearStoredObjects() { storedObjects = new IdentityHashMap<Object, Integer>(); storedObjectCount = 0; }
From source file:com.bianfeng.bfas.hive.io.RealtimeInputFormat2.java
/** * This function identifies and returns the hosts that contribute * most for a given split. For calculating the contribution, rack * locality is treated on par with host locality, so hosts from racks * that contribute the most are preferred over hosts on racks that * contribute less/*from w w w . j av a 2s.c o m*/ * @param blkLocations The list of block locations * @param offset * @param splitSize * @return array of hosts that contribute most to this split * @throws IOException */ protected String[] getSplitHosts(BlockLocation[] blkLocations, long offset, long splitSize, NetworkTopology clusterMap) throws IOException { int startIndex = getBlockIndex(blkLocations, offset); long bytesInThisBlock = blkLocations[startIndex].getOffset() + blkLocations[startIndex].getLength() - offset; //If this is the only block, just return if (bytesInThisBlock >= splitSize) { return blkLocations[startIndex].getHosts(); } long bytesInFirstBlock = bytesInThisBlock; int index = startIndex + 1; splitSize -= bytesInThisBlock; while (splitSize > 0) { bytesInThisBlock = Math.min(splitSize, blkLocations[index++].getLength()); splitSize -= bytesInThisBlock; } long bytesInLastBlock = bytesInThisBlock; int endIndex = index - 1; Map<Node, NodeInfo> hostsMap = new IdentityHashMap<Node, NodeInfo>(); Map<Node, NodeInfo> racksMap = new IdentityHashMap<Node, NodeInfo>(); String[] allTopos = new String[0]; // Build the hierarchy and aggregate the contribution of // bytes at each level. See TestGetSplitHosts.java for (index = startIndex; index <= endIndex; index++) { // Establish the bytes in this block if (index == startIndex) { bytesInThisBlock = bytesInFirstBlock; } else if (index == endIndex) { bytesInThisBlock = bytesInLastBlock; } else { bytesInThisBlock = blkLocations[index].getLength(); } allTopos = blkLocations[index].getTopologyPaths(); // If no topology information is available, just // prefix a fakeRack if (allTopos.length == 0) { allTopos = fakeRacks(blkLocations, index); } // NOTE: This code currently works only for one level of // hierarchy (rack/host). However, it is relatively easy // to extend this to support aggregation at different // levels for (String topo : allTopos) { Node node, parentNode; NodeInfo nodeInfo, parentNodeInfo; node = clusterMap.getNode(topo); if (node == null) { node = new NodeBase(topo); clusterMap.add(node); } nodeInfo = hostsMap.get(node); if (nodeInfo == null) { nodeInfo = new NodeInfo(node); hostsMap.put(node, nodeInfo); parentNode = node.getParent(); parentNodeInfo = racksMap.get(parentNode); if (parentNodeInfo == null) { parentNodeInfo = new NodeInfo(parentNode); racksMap.put(parentNode, parentNodeInfo); } parentNodeInfo.addLeaf(nodeInfo); } else { nodeInfo = hostsMap.get(node); parentNode = node.getParent(); parentNodeInfo = racksMap.get(parentNode); } nodeInfo.addValue(index, bytesInThisBlock); parentNodeInfo.addValue(index, bytesInThisBlock); } // for all topos } // for all indices return identifyHosts(allTopos.length, racksMap); }
From source file:edu.ucsb.cs.hadoop.CustomFileInputFormat.java
/** * This function identifies and returns the hosts that contribute most for a * given split. For calculating the contribution, rack locality is treated * on par with host locality, so hosts from racks that contribute the most * are preferred over hosts on racks that contribute less * @param blkLocations The list of block locations * @param offset//from w w w . j a v a 2s . c o m * @param splitSize * @return array of hosts that contribute most to this split * @throws IOException */ protected String[] getSplitHosts(BlockLocation[] blkLocations, long offset, long splitSize, NetworkTopology clusterMap) throws IOException { int startIndex = getBlockIndex(blkLocations, offset); long bytesInThisBlock = blkLocations[startIndex].getOffset() + blkLocations[startIndex].getLength() - offset; // If this is the only block, just return if (bytesInThisBlock >= splitSize) { return blkLocations[startIndex].getHosts(); } long bytesInFirstBlock = bytesInThisBlock; int index = startIndex + 1; splitSize -= bytesInThisBlock; while (splitSize > 0) { bytesInThisBlock = Math.min(splitSize, blkLocations[index++].getLength()); splitSize -= bytesInThisBlock; } long bytesInLastBlock = bytesInThisBlock; int endIndex = index - 1; Map<Node, NodeInfo> hostsMap = new IdentityHashMap<Node, NodeInfo>(); Map<Node, NodeInfo> racksMap = new IdentityHashMap<Node, NodeInfo>(); String[] allTopos = new String[0]; // Build the hierarchy and aggregate the contribution of // bytes at each level. See TestGetSplitHosts.java for (index = startIndex; index <= endIndex; index++) { // Establish the bytes in this block if (index == startIndex) { bytesInThisBlock = bytesInFirstBlock; } else if (index == endIndex) { bytesInThisBlock = bytesInLastBlock; } else { bytesInThisBlock = blkLocations[index].getLength(); } allTopos = blkLocations[index].getTopologyPaths(); // If no topology information is available, just // prefix a fakeRack if (allTopos.length == 0) { allTopos = fakeRacks(blkLocations, index); } // NOTE: This code currently works only for one level of // hierarchy (rack/host). However, it is relatively easy // to extend this to support aggregation at different // levels for (String topo : allTopos) { Node node, parentNode; NodeInfo nodeInfo, parentNodeInfo; node = clusterMap.getNode(topo); if (node == null) { node = new NodeBase(topo); clusterMap.add(node); } nodeInfo = hostsMap.get(node); if (nodeInfo == null) { nodeInfo = new NodeInfo(node); hostsMap.put(node, nodeInfo); parentNode = node.getParent(); parentNodeInfo = racksMap.get(parentNode); if (parentNodeInfo == null) { parentNodeInfo = new NodeInfo(parentNode); racksMap.put(parentNode, parentNodeInfo); } parentNodeInfo.addLeaf(nodeInfo); } else { nodeInfo = hostsMap.get(node); parentNode = node.getParent(); parentNodeInfo = racksMap.get(parentNode); } nodeInfo.addValue(index, bytesInThisBlock); parentNodeInfo.addValue(index, bytesInThisBlock); } // for all topos } // for all indices return identifyHosts(allTopos.length, racksMap); }
From source file:com.google.gwt.emultest.java.util.IdentityHashMapTest.java
public void testKeysConflict() { IdentityHashMap hashMap = new IdentityHashMap(); hashMap.put(STRING_ZERO_KEY, STRING_ZERO_VALUE); hashMap.put(INTEGER_ZERO_KEY, INTEGER_ZERO_VALUE); hashMap.put(ODD_ZERO_KEY, ODD_ZERO_VALUE); assertEquals(hashMap.get(INTEGER_ZERO_KEY), INTEGER_ZERO_VALUE); assertEquals(hashMap.get(ODD_ZERO_KEY), ODD_ZERO_VALUE); assertEquals(hashMap.get(STRING_ZERO_KEY), STRING_ZERO_VALUE); hashMap.remove(INTEGER_ZERO_KEY);/*from w w w . jav a 2 s .com*/ assertEquals(hashMap.get(ODD_ZERO_KEY), ODD_ZERO_VALUE); assertEquals(hashMap.get(STRING_ZERO_KEY), STRING_ZERO_VALUE); assertEquals(hashMap.get(INTEGER_ZERO_KEY), null); hashMap.remove(ODD_ZERO_KEY); assertEquals(hashMap.get(INTEGER_ZERO_KEY), null); assertEquals(hashMap.get(ODD_ZERO_KEY), null); assertEquals(hashMap.get(STRING_ZERO_KEY), STRING_ZERO_VALUE); hashMap.remove(STRING_ZERO_KEY); assertEquals(hashMap.get(INTEGER_ZERO_KEY), null); assertEquals(hashMap.get(ODD_ZERO_KEY), null); assertEquals(hashMap.get(STRING_ZERO_KEY), null); assertEquals(hashMap.size(), 0); }
From source file:org.nabucco.alfresco.enhScriptEnv.common.script.functions.AbstractLogFunction.java
protected Map<ReferenceScript, LoggerData> getScriptLoggerDataForContext(final Object scope, final boolean createIfNull) { Map<ReferenceScript, LoggerData> dataByScript = null; this.scopeLoggerDataLock.readLock().lock(); try {//from w w w. ja v a 2s . co m dataByScript = this.scopeLoggerData.get(scope); } finally { this.scopeLoggerDataLock.readLock().unlock(); } if (dataByScript == null && createIfNull) { dataByScript = new IdentityHashMap<ReferenceScript, LoggerData>(); this.scopeLoggerDataLock.writeLock().lock(); try { this.scopeLoggerData.put(scope, dataByScript); } finally { this.scopeLoggerDataLock.writeLock().unlock(); } } return dataByScript; }