Example usage for java.util IdentityHashMap IdentityHashMap

List of usage examples for java.util IdentityHashMap IdentityHashMap

Introduction

In this page you can find the example usage for java.util IdentityHashMap IdentityHashMap.

Prototype

public IdentityHashMap() 

Source Link

Document

Constructs a new, empty identity hash map with a default expected maximum size (21).

Usage

From source file:com.google.gwt.emultest.java.util.IdentityHashMapTest.java

public void testGet() {
    IdentityHashMap hashMap = new IdentityHashMap();
    checkEmptyHashMapAssumptions(hashMap);

    assertNull(hashMap.get(KEY_TEST_GET));
    hashMap.put(KEY_TEST_GET, VALUE_TEST_GET);
    assertNotNull(hashMap.get(KEY_TEST_GET));

    assertNull(hashMap.get(null));/*from   w ww .ja va  2s. c  o m*/
    hashMap.put(null, VALUE_TEST_GET);
    assertNotNull(hashMap.get(null));

    hashMap.put(null, null);
    assertNull(hashMap.get(null));
}

From source file:org.talend.daikon.properties.PropertiesImpl.java

@Override
public void accept(AnyPropertyVisitor visitor, Properties parent) {
    // uses a set that uses reference-equality instead of instance-equality to avoid stackoveflow with hashcode() using a
    // visitor.// w  ww .  j  a  v  a  2 s . c  o  m
    Set<Properties> visited = Collections.newSetFromMap(new IdentityHashMap<Properties, Boolean>());
    acceptInternal(visitor, parent, visited);
}

From source file:org.wso2.andes.server.plugins.PluginManager.java

public Map<List<String>, ConfigurationPluginFactory> getConfigurationPlugins() {
    Map<List<String>, ConfigurationPluginFactory> services = new IdentityHashMap<List<String>, ConfigurationPluginFactory>();

    if (_configTracker != null && _configTracker.getServices() != null) {
        for (Object service : _configTracker.getServices()) {
            ConfigurationPluginFactory factory = (ConfigurationPluginFactory) service;
            services.put(factory.getParentPaths(), factory);
        }/*from w ww . j  a v  a 2  s  .c  o  m*/
    }

    services.putAll(_configPlugins);

    return services;
}

From source file:org.optaplanner.core.impl.score.director.AbstractScoreDirector.java

public void assertShadowVariablesAreNotStale(Score expectedWorkingScore, Object completedAction) {
    SolutionDescriptor solutionDescriptor = getSolutionDescriptor();
    Map<Object, Map<ShadowVariableDescriptor, Object>> entityToShadowVariableValuesMap = new IdentityHashMap<Object, Map<ShadowVariableDescriptor, Object>>();
    for (Iterator<Object> it = solutionDescriptor.extractAllEntitiesIterator(workingSolution); it.hasNext();) {
        Object entity = it.next();
        EntityDescriptor entityDescriptor = solutionDescriptor.findEntityDescriptorOrFail(entity.getClass());
        Collection<ShadowVariableDescriptor> shadowVariableDescriptors = entityDescriptor
                .getShadowVariableDescriptors();
        Map<ShadowVariableDescriptor, Object> shadowVariableValuesMap = new HashMap<ShadowVariableDescriptor, Object>(
                shadowVariableDescriptors.size());
        for (ShadowVariableDescriptor shadowVariableDescriptor : shadowVariableDescriptors) {
            Object value = shadowVariableDescriptor.getValue(entity);
            shadowVariableValuesMap.put(shadowVariableDescriptor, value);
        }/*w w  w  . j av a2s.  co m*/
        entityToShadowVariableValuesMap.put(entity, shadowVariableValuesMap);
    }
    variableListenerSupport.triggerAllVariableListeners();
    for (Iterator<Object> it = solutionDescriptor.extractAllEntitiesIterator(workingSolution); it.hasNext();) {
        Object entity = it.next();
        EntityDescriptor entityDescriptor = solutionDescriptor.findEntityDescriptorOrFail(entity.getClass());
        Collection<ShadowVariableDescriptor> shadowVariableDescriptors = entityDescriptor
                .getShadowVariableDescriptors();
        Map<ShadowVariableDescriptor, Object> shadowVariableValuesMap = entityToShadowVariableValuesMap
                .get(entity);
        for (ShadowVariableDescriptor shadowVariableDescriptor : shadowVariableDescriptors) {
            Object newValue = shadowVariableDescriptor.getValue(entity);
            Object originalValue = shadowVariableValuesMap.get(shadowVariableDescriptor);
            if (!ObjectUtils.equals(originalValue, newValue)) {
                throw new IllegalStateException(VariableListener.class.getSimpleName() + " corruption:"
                        + " the entity (" + entity + ")'s shadow variable ("
                        + shadowVariableDescriptor.getSimpleEntityAndVariableName() + ")'s corrupted value ("
                        + originalValue + ") changed to uncorrupted value (" + newValue + ") after all "
                        + VariableListener.class.getSimpleName()
                        + "s were triggered without changes to the genuine variables.\n" + "Probably the "
                        + VariableListener.class.getSimpleName() + " class for that shadow variable ("
                        + shadowVariableDescriptor.getSimpleEntityAndVariableName()
                        + ") forgot to update it when one of its sources changed" + " after completedAction ("
                        + completedAction + ").");
            }
        }
    }
    Score workingScore = calculateScore();
    if (!expectedWorkingScore.equals(workingScore)) {
        throw new IllegalStateException("Impossible " + VariableListener.class.getSimpleName() + " corruption:"
                + " the expectedWorkingScore (" + expectedWorkingScore + ") is not the workingScore  ("
                + workingScore + ") after all " + VariableListener.class.getSimpleName()
                + "s were triggered without changes to the genuine variables.\n"
                + "But all the shadow variable values are still the same, so this is impossible.");
    }
}

From source file:org.apache.cassandra.db.index.SecondaryIndexManager.java

/**
 * @return all indexes which do *not* use a backing CFS internally
 *//*w ww . j a  v a  2s . c o  m*/
public Set<SecondaryIndex> getIndexesNotBackedByCfs() {
    // we use identity map because per row indexes use same instance across many columns
    Set<SecondaryIndex> indexes = Collections.newSetFromMap(new IdentityHashMap<SecondaryIndex, Boolean>());
    for (SecondaryIndex index : allIndexes)
        if (index.getIndexCfs() == null)
            indexes.add(index);
    return indexes;
}

From source file:com.google.gwt.emultest.java.util.IdentityHashMapTest.java

public void testHashCode() {
    IdentityHashMap hashMap = new IdentityHashMap();
    checkEmptyHashMapAssumptions(hashMap);

    // Check that hashCode changes
    int hashCode1 = hashMap.hashCode();
    hashMap.put(KEY_KEY, VALUE_VAL);/*from w  w  w. j a v a2s  .com*/
    int hashCode2 = hashMap.hashCode();

    assertTrue(hashCode1 != hashCode2);
}

From source file:org.optaplanner.benchmark.impl.result.ProblemBenchmarkResult.java

protected static Map<ProblemBenchmarkResult, ProblemBenchmarkResult> createMergeMap(
        PlannerBenchmarkResult newPlannerBenchmarkResult,
        List<SingleBenchmarkResult> singleBenchmarkResultList) {
    // IdentityHashMap but despite that different ProblemBenchmarkResult instances are merged
    Map<ProblemBenchmarkResult, ProblemBenchmarkResult> mergeMap = new IdentityHashMap<ProblemBenchmarkResult, ProblemBenchmarkResult>();
    Map<File, ProblemBenchmarkResult> fileToNewResultMap = new HashMap<File, ProblemBenchmarkResult>();
    for (SingleBenchmarkResult singleBenchmarkResult : singleBenchmarkResultList) {
        ProblemBenchmarkResult oldResult = singleBenchmarkResult.getProblemBenchmarkResult();
        if (!mergeMap.containsKey(oldResult)) {
            ProblemBenchmarkResult newResult;
            if (!fileToNewResultMap.containsKey(oldResult.inputSolutionFile)) {
                newResult = new ProblemBenchmarkResult(newPlannerBenchmarkResult);
                newResult.name = oldResult.name;
                newResult.inputSolutionFile = oldResult.inputSolutionFile;
                // Skip oldResult.problemReportDirectory
                newResult.problemStatisticList = new ArrayList<ProblemStatistic>(
                        oldResult.problemStatisticList.size());
                for (ProblemStatistic oldProblemStatistic : oldResult.problemStatisticList) {
                    newResult.problemStatisticList.add(
                            oldProblemStatistic.getProblemStatisticType().buildProblemStatistic(newResult));
                }/*w  w  w .  jav  a  2 s .c  o m*/
                newResult.singleBenchmarkResultList = new ArrayList<SingleBenchmarkResult>(
                        oldResult.singleBenchmarkResultList.size());
                newResult.problemScale = oldResult.problemScale;
                fileToNewResultMap.put(oldResult.inputSolutionFile, newResult);
                newPlannerBenchmarkResult.getUnifiedProblemBenchmarkResultList().add(newResult);
            } else {
                newResult = fileToNewResultMap.get(oldResult.inputSolutionFile);
                if (!ObjectUtils.equals(oldResult.name, newResult.name)) {
                    throw new IllegalStateException("The oldResult (" + oldResult + ") and newResult ("
                            + newResult
                            + ") should have the same name, because they have the same inputSolutionFile ("
                            + oldResult.inputSolutionFile + ").");
                }
                for (Iterator<ProblemStatistic> it = newResult.problemStatisticList.iterator(); it.hasNext();) {
                    ProblemStatistic newStatistic = it.next();
                    if (!oldResult.hasProblemStatisticType(newStatistic.getProblemStatisticType())) {
                        it.remove();
                    }
                }
                newResult.problemScale = ConfigUtils.mergeProperty(oldResult.problemScale,
                        newResult.problemScale);
            }
            mergeMap.put(oldResult, newResult);
        }
    }
    return mergeMap;
}

From source file:com.google.gwt.emultest.java.util.IdentityHashMapTest.java

public void testHashMap() {
    IdentityHashMap hashMap = new IdentityHashMap();
    checkEmptyHashMapAssumptions(hashMap);
}

From source file:org.lilyproject.indexer.engine.IndexUpdater.java

private void updateDenormalizedData(RecordId recordId, RecordEvent event,
        Map<Scope, Set<FieldType>> updatedFieldsByScope, Map<Long, Set<String>> vtagsByVersion) {

    // This algorithm is designed to first collect all the reindex-work, and then to perform it.
    // Otherwise the same document would be indexed multiple times if it would become invalid
    // because of different reasons (= different indexFields).

    ////from  w ww .  j ava 2 s  . c  o m
    // Collect all the relevant IndexFields, and for each the relevant vtags
    //

    // This map will contain all the IndexFields we need to treat, and for each one the vtags to be considered
    Map<IndexField, Set<String>> indexFieldsVTags = new IdentityHashMap<IndexField, Set<String>>() {
        @Override
        public Set<String> get(Object key) {
            if (!this.containsKey(key) && key instanceof IndexField) {
                this.put((IndexField) key, new HashSet<String>());
            }
            return super.get(key);
        }
    };

    // There are two cases when denormalized data needs updating:
    //   1. when the content of a (vtagged) record changes
    //   2. when vtags change (are added, removed or point to a different version)
    // We now handle these 2 cases.

    // === Case 1 === updates in response to changes to this record

    long version = event.getVersionCreated() == -1 ? event.getVersionUpdated() : event.getVersionCreated();

    // Determine the relevant index fields
    List<IndexField> indexFields;
    if (event.getType() == RecordEvent.Type.DELETE) {
        indexFields = indexer.getConf().getDerefIndexFields();
    } else {
        indexFields = new ArrayList<IndexField>();

        collectDerefIndexFields(updatedFieldsByScope.get(Scope.NON_VERSIONED), indexFields);

        if (version != -1 && vtagsByVersion.get(version) != null) {
            collectDerefIndexFields(updatedFieldsByScope.get(Scope.VERSIONED), indexFields);
            collectDerefIndexFields(updatedFieldsByScope.get(Scope.VERSIONED_MUTABLE), indexFields);
        }
    }

    // For each indexField, determine the vtags of the referrer that we should consider.
    // In the context of this algorithm, a referrer is each record whose index might contain
    // denormalized data from the record of which we are now processing the change event.
    nextIndexField: for (IndexField indexField : indexFields) {
        DerefValue derefValue = (DerefValue) indexField.getValue();
        FieldType fieldType = derefValue.getTargetField();

        //
        // Determine the vtags of the referrer that we should consider
        //
        Set<String> referrerVtags = indexFieldsVTags.get(indexField);

        // we do not know if the referrer has any versions at all, so always add the versionless tag
        referrerVtags.add(VersionTag.VERSIONLESS_TAG);

        if (fieldType.getScope() == Scope.NON_VERSIONED || event.getType() == RecordEvent.Type.DELETE) {
            // If it is a non-versioned field, then all vtags should be considered.
            // If it is a delete event, we do not know what vtags existed for the record, so consider them all.
            referrerVtags.addAll(indexer.getConf().getVtags());
        } else {
            // Otherwise only the vtags of the created/updated version, if any
            if (version != -1) {
                Set<String> vtags = vtagsByVersion.get(version);
                if (vtags != null)
                    referrerVtags.addAll(vtags);
            }
        }
    }

    // === Case 2 === handle updated/added/removed vtags

    Set<String> changedVTagFields = VersionTag.filterVTagFields(event.getUpdatedFields(), typeManager);
    if (!changedVTagFields.isEmpty()) {
        // In this case, the IndexFields which we need to handle are those that use fields from:
        //  - the previous version to which the vtag pointed (if it is not a new vtag)
        //  - the new version to which the vtag points (if it is not a deleted vtag)
        // But rather than calculating all that (consider the need to retrieve the versions),
        // for now we simply consider all IndexFields.
        // TODO could optimize this to exclude deref fields that use only non-versioned fields?
        for (IndexField indexField : indexer.getConf().getDerefIndexFields()) {
            indexFieldsVTags.get(indexField).addAll(changedVTagFields);
        }
    }

    //
    // Now search the referrers, that is: for each link field, find out which records point to the current record
    // in a certain versioned view (= a certain vtag)
    //

    // This map holds the referrer records to reindex, and for which versions (vtags) they need to be reindexed.
    Map<RecordId, Set<String>> referrersVTags = new HashMap<RecordId, Set<String>>() {
        @Override
        public Set<String> get(Object key) {
            if (!containsKey(key) && key instanceof RecordId) {
                put((RecordId) key, new HashSet<String>());
            }
            return super.get(key);
        }
    };

    int searchedFollowCount = 0;

    // Run over the IndexFields
    nextIndexField: for (Map.Entry<IndexField, Set<String>> entry : indexFieldsVTags.entrySet()) {
        IndexField indexField = entry.getKey();
        Set<String> referrerVTags = entry.getValue();
        DerefValue derefValue = (DerefValue) indexField.getValue();

        // Run over the version tags
        for (String referrerVtag : referrerVTags) {
            List<DerefValue.Follow> follows = derefValue.getFollows();

            Set<RecordId> referrers = new HashSet<RecordId>();
            referrers.add(recordId);

            for (int i = follows.size() - 1; i >= 0; i--) {
                searchedFollowCount++;
                DerefValue.Follow follow = follows.get(i);

                Set<RecordId> newReferrers = new HashSet<RecordId>();

                if (follow instanceof DerefValue.FieldFollow) {
                    String fieldId = ((DerefValue.FieldFollow) follow).getFieldId();
                    for (RecordId referrer : referrers) {
                        try {
                            Set<RecordId> linkReferrers = linkIndex.getReferrers(referrer, referrerVtag,
                                    fieldId);
                            newReferrers.addAll(linkReferrers);
                        } catch (IOException e) {
                            // TODO
                            e.printStackTrace();
                        }
                    }
                } else if (follow instanceof DerefValue.VariantFollow) {
                    DerefValue.VariantFollow varFollow = (DerefValue.VariantFollow) follow;
                    Set<String> dimensions = varFollow.getDimensions();

                    // We need to find out the variants of the current set of referrers which have the
                    // same variant properties as the referrer (= same key/value pairs) and additionally
                    // have the extra dimensions defined in the VariantFollow.

                    nextReferrer: for (RecordId referrer : referrers) {

                        Map<String, String> refprops = referrer.getVariantProperties();

                        // If the referrer already has one of the dimensions, then skip it
                        for (String dimension : dimensions) {
                            if (refprops.containsKey(dimension))
                                continue nextReferrer;
                        }

                        //
                        Set<RecordId> variants;
                        try {
                            variants = repository.getVariants(referrer);
                        } catch (Exception e) {
                            // TODO we should probably throw this higher up and let it be handled there
                            throw new RuntimeException(e);
                        }

                        nextVariant: for (RecordId variant : variants) {
                            Map<String, String> varprops = variant.getVariantProperties();

                            // Check it has each of the variant properties of the current referrer record
                            for (Map.Entry<String, String> refprop : refprops.entrySet()) {
                                if (!ObjectUtils.safeEquals(varprops.get(refprop.getKey()),
                                        refprop.getValue())) {
                                    // skip this variant
                                    continue nextVariant;
                                }
                            }

                            // Check it has the additional dimensions
                            for (String dimension : dimensions) {
                                if (!varprops.containsKey(dimension))
                                    continue nextVariant;
                            }

                            // We have a hit
                            newReferrers.add(variant);
                        }
                    }
                } else if (follow instanceof DerefValue.MasterFollow) {
                    for (RecordId referrer : referrers) {
                        // A MasterFollow can only point to masters
                        if (referrer.isMaster()) {
                            Set<RecordId> variants;
                            try {
                                variants = repository.getVariants(referrer);
                            } catch (RepositoryException e) {
                                // TODO we should probably throw this higher up and let it be handled there
                                throw new RuntimeException(e);
                            } catch (InterruptedException e) {
                                // TODO we should probably throw this higher up and let it be handled there
                                Thread.currentThread().interrupt();
                                throw new RuntimeException(e);
                            }

                            variants.remove(referrer);
                            newReferrers.addAll(variants);
                        }
                    }
                } else {
                    throw new RuntimeException(
                            "Unexpected implementation of DerefValue.Follow: " + follow.getClass().getName());
                }

                referrers = newReferrers;
            }

            for (RecordId referrer : referrers) {
                referrersVTags.get(referrer).add(referrerVtag);
            }
        }
    }

    if (log.isDebugEnabled()) {
        log.debug(String.format(
                "Record %1$s: found %2$s records (times vtags) to be updated because they "
                        + "might contain outdated denormalized data. Checked %3$s follow instances.",
                recordId, referrersVTags.size(), searchedFollowCount));
    }

    //
    // Now re-index all the found referrers
    //
    nextReferrer: for (Map.Entry<RecordId, Set<String>> entry : referrersVTags.entrySet()) {
        RecordId referrer = entry.getKey();
        Set<String> vtagsToIndex = entry.getValue();

        boolean lockObtained = false;
        try {
            indexLocker.lock(referrer);
            lockObtained = true;

            IdRecord record = null;
            try {
                // TODO optimize this: we are only interested to know the vtags and to know if the record has versions
                record = repository.readWithIds(referrer, null, null);
            } catch (Exception e) {
                // TODO handle this
                // One case to be expected here is that the record has been deleted since we read the list of referrers
                e.printStackTrace();
            }

            IndexCase indexCase = indexer.getConf().getIndexCase(record.getRecordTypeName(),
                    record.getId().getVariantProperties());
            if (indexCase == null) {
                continue nextReferrer;
            }

            try {
                if (record.getVersion() == null) {
                    if (indexCase.getIndexVersionless() && vtagsToIndex.contains(VersionTag.VERSIONLESS_TAG)) {
                        indexer.index(record, Collections.singleton(VersionTag.VERSIONLESS_TAG));
                    }
                } else {
                    Map<String, Long> recordVTags = VersionTag.getTagsById(record, typeManager);
                    vtagsToIndex.retainAll(indexCase.getVersionTags());
                    // Only keep vtags which exist on the record
                    vtagsToIndex.retainAll(recordVTags.keySet());
                    indexer.indexRecord(record.getId(), vtagsToIndex, recordVTags);
                }
            } catch (Exception e) {
                // TODO handle this
                e.printStackTrace();
            }
        } catch (IndexLockException e) {
            // TODO handle this
            e.printStackTrace();
        } finally {
            if (lockObtained) {
                indexLocker.unlockLogFailure(referrer);
            }
        }
    }

}

From source file:org.apache.cassandra.db.DirectoriesTest.java

@Test
public void testDiskFreeSpace() {
    DataDirectory[] dataDirectories = new DataDirectory[] { new DataDirectory(new File("/nearlyFullDir1")) {
        public long getAvailableSpace() {
            return 11L;
        }/*  ww  w.  j a  v a2 s  .  c  o m*/
    }, new DataDirectory(new File("/nearlyFullDir2")) {
        public long getAvailableSpace() {
            return 10L;
        }
    }, new DataDirectory(new File("/uniformDir1")) {
        public long getAvailableSpace() {
            return 1000L;
        }
    }, new DataDirectory(new File("/uniformDir2")) {
        public long getAvailableSpace() {
            return 999L;
        }
    }, new DataDirectory(new File("/veryFullDir")) {
        public long getAvailableSpace() {
            return 4L;
        }
    } };

    // directories should be sorted
    // 1. by their free space ratio
    // before weighted random is applied
    List<Directories.DataDirectoryCandidate> candidates = getWriteableDirectories(dataDirectories, 0L);
    assertSame(dataDirectories[2], candidates.get(0).dataDirectory); // available: 1000
    assertSame(dataDirectories[3], candidates.get(1).dataDirectory); // available: 999
    assertSame(dataDirectories[0], candidates.get(2).dataDirectory); // available: 11
    assertSame(dataDirectories[1], candidates.get(3).dataDirectory); // available: 10

    // check for writeSize == 5
    Map<DataDirectory, DataDirectory> testMap = new IdentityHashMap<>();
    for (int i = 0;; i++) {
        candidates = getWriteableDirectories(dataDirectories, 5L);
        assertEquals(4, candidates.size());

        DataDirectory dir = Directories.pickWriteableDirectory(candidates);
        testMap.put(dir, dir);

        assertFalse(testMap.size() > 4);
        if (testMap.size() == 4) {
            // at least (rule of thumb) 100 iterations to see whether there are more (wrong) directories returned
            if (i >= 100)
                break;
        }

        // random weighted writeable directory algorithm fails to return all possible directories after
        // many tries
        if (i >= 10000000)
            fail();
    }

    // check for writeSize == 11
    testMap.clear();
    for (int i = 0;; i++) {
        candidates = getWriteableDirectories(dataDirectories, 11L);
        assertEquals(3, candidates.size());
        for (Directories.DataDirectoryCandidate candidate : candidates)
            assertTrue(candidate.dataDirectory.getAvailableSpace() >= 11L);

        DataDirectory dir = Directories.pickWriteableDirectory(candidates);
        testMap.put(dir, dir);

        assertFalse(testMap.size() > 3);
        if (testMap.size() == 3) {
            // at least (rule of thumb) 100 iterations
            if (i >= 100)
                break;
        }

        // random weighted writeable directory algorithm fails to return all possible directories after
        // many tries
        if (i >= 10000000)
            fail();
    }
}