Example usage for java.util IdentityHashMap IdentityHashMap

List of usage examples for java.util IdentityHashMap IdentityHashMap

Introduction

In this page you can find the example usage for java.util IdentityHashMap IdentityHashMap.

Prototype

public IdentityHashMap() 

Source Link

Document

Constructs a new, empty identity hash map with a default expected maximum size (21).

Usage

From source file:org.apache.hama.bsp.FileInputFormat.java

/**
 * This function identifies and returns the hosts that contribute most for a
 * given split. For calculating the contribution, rack locality is treated on
 * par with host locality, so hosts from racks that contribute the most are
 * preferred over hosts on racks that contribute less
 * /* w  w  w.j av  a2 s .  co m*/
 * @param blkLocations The list of block locations
 * @param offset
 * @param pSplitSize
 * @return array of hosts that contribute most to this split
 * @throws IOException
 */
protected String[] getSplitHosts(BlockLocation[] blkLocations, long offset, long pSplitSize,
        NetworkTopology clusterMap) throws IOException {
    long splitSize = pSplitSize;
    int startIndex = getBlockIndex(blkLocations, offset);

    long bytesInThisBlock = blkLocations[startIndex].getOffset() + blkLocations[startIndex].getLength()
            - offset;

    // If this is the only block, just return
    if (bytesInThisBlock >= splitSize) {
        return blkLocations[startIndex].getHosts();
    }

    long bytesInFirstBlock = bytesInThisBlock;
    int index = startIndex + 1;
    splitSize -= bytesInThisBlock;

    while (splitSize > 0) {
        bytesInThisBlock = Math.min(splitSize, blkLocations[index++].getLength());
        splitSize -= bytesInThisBlock;
    }

    long bytesInLastBlock = bytesInThisBlock;
    int endIndex = index - 1;

    Map<Node, NodeInfo> hostsMap = new IdentityHashMap<Node, NodeInfo>();
    Map<Node, NodeInfo> racksMap = new IdentityHashMap<Node, NodeInfo>();
    String[] allTopos = new String[0];

    // Build the hierarchy and aggregate the contribution of
    // bytes at each level. See TestGetSplitHosts.java

    for (index = startIndex; index <= endIndex; index++) {

        // Establish the bytes in this block
        if (index == startIndex) {
            bytesInThisBlock = bytesInFirstBlock;
        } else if (index == endIndex) {
            bytesInThisBlock = bytesInLastBlock;
        } else {
            bytesInThisBlock = blkLocations[index].getLength();
        }

        allTopos = blkLocations[index].getTopologyPaths();

        // If no topology information is available, just
        // prefix a fakeRack
        if (allTopos.length == 0) {
            allTopos = fakeRacks(blkLocations, index);
        }

        // NOTE: This code currently works only for one level of
        // hierarchy (rack/host). However, it is relatively easy
        // to extend this to support aggregation at different
        // levels

        for (String topo : allTopos) {

            Node node, parentNode;
            NodeInfo nodeInfo, parentNodeInfo;

            node = clusterMap.getNode(topo);

            if (node == null) {
                node = new NodeBase(topo);
                clusterMap.add(node);
            }

            nodeInfo = hostsMap.get(node);

            if (nodeInfo == null) {
                nodeInfo = new NodeInfo(node);
                hostsMap.put(node, nodeInfo);
                parentNode = node.getParent();
                parentNodeInfo = racksMap.get(parentNode);
                if (parentNodeInfo == null) {
                    parentNodeInfo = new NodeInfo(parentNode);
                    racksMap.put(parentNode, parentNodeInfo);
                }
                parentNodeInfo.addLeaf(nodeInfo);
            } else {
                nodeInfo = hostsMap.get(node);
                parentNode = node.getParent();
                parentNodeInfo = racksMap.get(parentNode);
            }

            nodeInfo.addValue(index, bytesInThisBlock);
            parentNodeInfo.addValue(index, bytesInThisBlock);

        } // for all topos

    } // for all indices

    return identifyHosts(allTopos.length, racksMap);
}

From source file:sf.net.experimaestro.manager.plans.Operator.java

/**
 * Get a simplified view of the plan/*from  ww w. j av  a2 s  .c om*/
 * @return A map for task operators
 * @param experiment
 */
public IdentityHashMap<TaskOperator, TaskReference> getTaskOperatorMap(Experiment experiment) {
    IdentityHashMap<TaskOperator, TaskReference> map = new IdentityHashMap<>();
    getTaskOperatorMap(experiment, map, null);
    return map;
}

From source file:com.google.gwt.emultest.java.util.IdentityHashMapTest.java

public void testKeySet() {
    IdentityHashMap hashMap = new IdentityHashMap();
    checkEmptyHashMapAssumptions(hashMap);

    Set keySet = hashMap.keySet();
    assertNotNull(keySet);/*from  w ww.  j  a v  a2  s.  c o m*/
    assertTrue(keySet.isEmpty());
    assertTrue(keySet.size() == 0);

    hashMap.put(KEY_TEST_KEY_SET, VALUE_TEST_KEY_SET);

    assertTrue(keySet.size() == SIZE_ONE);
    assertTrue(keySet.contains(KEY_TEST_KEY_SET));
    assertFalse(keySet.contains(VALUE_TEST_KEY_SET));
    assertFalse(keySet.contains(KEY_TEST_KEY_SET.toUpperCase(Locale.ROOT)));
}

From source file:ca.uhn.fhir.jpa.term.BaseHapiTerminologySvc.java

@Override
@Transactional(propagation = Propagation.REQUIRED)
public void storeNewCodeSystemVersion(Long theCodeSystemResourcePid, String theSystemUri,
        TermCodeSystemVersion theCodeSystemVersion) {
    ourLog.info("Storing code system");

    ValidateUtil.isTrueOrThrowInvalidRequest(theCodeSystemVersion.getResource() != null,
            "No resource supplied");
    ValidateUtil.isNotBlankOrThrowInvalidRequest(theSystemUri, "No system URI supplied");

    // Grab the existing versions so we can delete them later
    List<TermCodeSystemVersion> existing = myCodeSystemVersionDao
            .findByCodeSystemResource(theCodeSystemResourcePid);

    /*//  w w w  . jav a  2 s .  c o  m
     * For now we always delete old versions.. At some point it would be nice to allow configuration to keep old versions
     */

    ourLog.info("Deleting old code system versions");
    for (TermCodeSystemVersion next : existing) {
        ourLog.info(" * Deleting code system version {}", next.getPid());
        myConceptParentChildLinkDao.deleteByCodeSystemVersion(next.getPid());
        myConceptDao.deleteByCodeSystemVersion(next.getPid());
    }

    ourLog.info("Flushing...");

    myConceptParentChildLinkDao.flush();
    myConceptDao.flush();

    ourLog.info("Done flushing");

    /*
     * Do the upload
     */

    TermCodeSystem codeSystem = getCodeSystem(theSystemUri);
    if (codeSystem == null) {
        codeSystem = myCodeSystemDao.findByResourcePid(theCodeSystemResourcePid);
        if (codeSystem == null) {
            codeSystem = new TermCodeSystem();
        }
        codeSystem.setResource(theCodeSystemVersion.getResource());
        codeSystem.setCodeSystemUri(theSystemUri);
        myCodeSystemDao.save(codeSystem);
    } else {
        if (!ObjectUtil.equals(codeSystem.getResource().getId(), theCodeSystemVersion.getResource().getId())) {
            String msg = myContext.getLocalizer().getMessage(BaseHapiTerminologySvc.class,
                    "cannotCreateDuplicateCodeSystemUri", theSystemUri,
                    codeSystem.getResource().getIdDt().toUnqualifiedVersionless().getValue());
            throw new UnprocessableEntityException(msg);
        }
    }

    ourLog.info("Validating all codes in CodeSystem for storage (this can take some time for large sets)");

    // Validate the code system
    ArrayList<String> conceptsStack = new ArrayList<String>();
    IdentityHashMap<TermConcept, Object> allConcepts = new IdentityHashMap<TermConcept, Object>();
    int totalCodeCount = 0;
    for (TermConcept next : theCodeSystemVersion.getConcepts()) {
        totalCodeCount += validateConceptForStorage(next, theCodeSystemVersion, conceptsStack, allConcepts);
    }

    ourLog.info("Saving version");

    TermCodeSystemVersion codeSystemVersion = myCodeSystemVersionDao.saveAndFlush(theCodeSystemVersion);

    ourLog.info("Saving code system");

    codeSystem.setCurrentVersion(theCodeSystemVersion);
    codeSystem = myCodeSystemDao.saveAndFlush(codeSystem);

    ourLog.info("Setting codesystemversion on {} concepts...", totalCodeCount);

    for (TermConcept next : theCodeSystemVersion.getConcepts()) {
        populateVersion(next, codeSystemVersion);
    }

    ourLog.info("Saving {} concepts...", totalCodeCount);

    IdentityHashMap<TermConcept, Object> conceptsStack2 = new IdentityHashMap<TermConcept, Object>();
    for (TermConcept next : theCodeSystemVersion.getConcepts()) {
        persistChildren(next, codeSystemVersion, conceptsStack2, totalCodeCount);
    }

    ourLog.info("Done saving concepts, flushing to database");

    myConceptDao.flush();
    myConceptParentChildLinkDao.flush();

    ourLog.info("Done deleting old code system versions");

    if (myConceptsToSaveLater.size() > 0 || myConceptLinksToSaveLater.size() > 0) {
        ourLog.info("Note that some concept saving was deferred - still have {} concepts and {} relationships",
                myConceptsToSaveLater.size(), myConceptLinksToSaveLater.size());
    }
}

From source file:com.google.gwt.emultest.java.util.IdentityHashMapTest.java

public void testPut() {
    IdentityHashMap hashMap = new IdentityHashMap();
    checkEmptyHashMapAssumptions(hashMap);

    assertNull(hashMap.put(KEY_TEST_PUT, VALUE_TEST_PUT_1));
    assertEquals(hashMap.put(KEY_TEST_PUT, VALUE_TEST_PUT_2), VALUE_TEST_PUT_1);
    assertNull(hashMap.put(null, VALUE_TEST_PUT_1));
    assertEquals(hashMap.put(null, VALUE_TEST_PUT_2), VALUE_TEST_PUT_1);
}

From source file:org.apache.openjpa.kernel.BrokerImpl.java

@SuppressWarnings("unchecked")
private void initializeOperatingSet() {
    if (_operatingDirty) {
        _operatingDirty = false;//from  w w w . j av  a2  s.c  o  m
        _operating = MapBackedSet.decorate(new IdentityHashMap<Object, Object>());
    }
}

From source file:org.openmrs.module.ModuleFileParser.java

/**
 * load in extensions//from  w  w  w .j a va2 s.  co  m
 *
 * @param root
 * @param configVersion
 * @return
 */
private IdentityHashMap<String, String> getExtensions(Element root, String configVersion) {

    IdentityHashMap<String, String> extensions = new IdentityHashMap<String, String>();

    NodeList extensionNodes = root.getElementsByTagName("extension");
    if (extensionNodes.getLength() > 0) {
        log.debug("# extensions: " + extensionNodes.getLength());
        int i = 0;
        while (i < extensionNodes.getLength()) {
            Node node = extensionNodes.item(i);
            NodeList nodes = node.getChildNodes();
            int x = 0;
            String point = "", extClass = "";
            while (x < nodes.getLength()) {
                Node childNode = nodes.item(x);
                if ("point".equals(childNode.getNodeName())) {
                    point = childNode.getTextContent().trim();
                } else if ("class".equals(childNode.getNodeName())) {
                    extClass = childNode.getTextContent().trim();
                }
                x++;
            }
            log.debug("point: " + point + " class: " + extClass);

            // point and class are required
            if (point.length() > 0 && extClass.length() > 0) {
                if (point.indexOf(Extension.extensionIdSeparator) != -1) {
                    log.warn("Point id contains illegal character: '" + Extension.extensionIdSeparator + "'");
                } else {
                    extensions.put(point, extClass);
                }
            } else {
                log.warn("'point' and 'class' are required for extensions. Given '" + point + "' and '"
                        + extClass + "'");
            }
            i++;
        }
    }

    return extensions;

}

From source file:com.google.gwt.emultest.java.util.IdentityHashMapTest.java

/**
 * Test method for 'java.util.IdentityHashMap.putAll(Map)'.
 *//*from  w  ww.  j ava2s  .  co m*/
public void testPutAll() {
    IdentityHashMap srcMap = new IdentityHashMap();
    checkEmptyHashMapAssumptions(srcMap);

    srcMap.put(KEY_1, VALUE_1);
    srcMap.put(KEY_2, VALUE_2);
    srcMap.put(KEY_3, VALUE_3);

    // Make sure that the data is copied correctly
    IdentityHashMap dstMap = new IdentityHashMap();
    checkEmptyHashMapAssumptions(dstMap);

    dstMap.putAll(srcMap);
    assertEquals(srcMap.size(), dstMap.size());
    assertTrue(dstMap.containsKey(KEY_1));
    assertTrue(dstMap.containsValue(VALUE_1));
    assertFalse(dstMap.containsKey(KEY_1.toUpperCase(Locale.ROOT)));
    assertFalse(dstMap.containsValue(VALUE_1.toUpperCase(Locale.ROOT)));

    assertTrue(dstMap.containsKey(KEY_2));
    assertTrue(dstMap.containsValue(VALUE_2));
    assertFalse(dstMap.containsKey(KEY_2.toUpperCase(Locale.ROOT)));
    assertFalse(dstMap.containsValue(VALUE_2.toUpperCase(Locale.ROOT)));

    assertTrue(dstMap.containsKey(KEY_3));
    assertTrue(dstMap.containsValue(VALUE_3));
    assertFalse(dstMap.containsKey(KEY_3.toUpperCase(Locale.ROOT)));
    assertFalse(dstMap.containsValue(VALUE_3.toUpperCase(Locale.ROOT)));

    // Check that an empty map does not blow away the contents of the
    // destination map
    IdentityHashMap emptyMap = new IdentityHashMap();
    checkEmptyHashMapAssumptions(emptyMap);
    dstMap.putAll(emptyMap);
    assertTrue(dstMap.size() == srcMap.size());

    // Check that put all overwrite any existing mapping in the destination map
    srcMap.put(KEY_1, VALUE_2);
    srcMap.put(KEY_2, VALUE_3);
    srcMap.put(KEY_3, VALUE_1);

    dstMap.putAll(srcMap);
    assertEquals(dstMap.size(), srcMap.size());
    assertEquals(dstMap.get(KEY_1), VALUE_2);
    assertEquals(dstMap.get(KEY_2), VALUE_3);
    assertEquals(dstMap.get(KEY_3), VALUE_1);

    // Check that a putAll does adds data but does not remove it

    srcMap.put(KEY_4, VALUE_4);
    dstMap.putAll(srcMap);
    assertEquals(dstMap.size(), srcMap.size());
    assertTrue(dstMap.containsKey(KEY_4));
    assertTrue(dstMap.containsValue(VALUE_4));
    assertEquals(dstMap.get(KEY_1), VALUE_2);
    assertEquals(dstMap.get(KEY_2), VALUE_3);
    assertEquals(dstMap.get(KEY_3), VALUE_1);
    assertEquals(dstMap.get(KEY_4), VALUE_4);

    dstMap.putAll(dstMap);
}

From source file:org.apache.hadoop.mapred.JobInProgress.java

JobInProgress(JobTracker jobtracker, final JobConf default_conf, JobInfo jobInfo, int rCount, Credentials ts)
        throws IOException, InterruptedException {
    try {/*  w  w  w .ja va  2s  .co m*/
        this.restartCount = rCount;
        this.jobId = JobID.downgrade(jobInfo.getJobID());
        String url = "http://" + jobtracker.getJobTrackerMachine() + ":" + jobtracker.getInfoPort()
                + "/jobdetails.jsp?jobid=" + jobId;
        this.jobtracker = jobtracker;
        this.status = new JobStatus(jobId, 0.0f, 0.0f, JobStatus.PREP);
        this.status.setUsername(jobInfo.getUser().toString());
        this.jobtracker.getInstrumentation().addPrepJob(conf, jobId);
        // Add the queue-level metric below (after the profile has been initialized)
        this.startTime = jobtracker.getClock().getTime();
        status.setStartTime(startTime);
        this.localFs = jobtracker.getLocalFileSystem();

        this.tokenStorage = ts;
        // use the user supplied token to add user credentials to the conf
        jobSubmitDir = jobInfo.getJobSubmitDir();
        user = jobInfo.getUser().toString();
        userUGI = UserGroupInformation.createRemoteUser(user);
        if (ts != null) {
            for (Token<? extends TokenIdentifier> token : ts.getAllTokens()) {
                userUGI.addToken(token);
            }
        }

        fs = userUGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
            public FileSystem run() throws IOException {
                return jobSubmitDir.getFileSystem(default_conf);
            }
        });

        /** check for the size of jobconf **/
        Path submitJobFile = JobSubmissionFiles.getJobConfPath(jobSubmitDir);
        FileStatus fstatus = fs.getFileStatus(submitJobFile);
        if (fstatus.getLen() > jobtracker.MAX_JOBCONF_SIZE) {
            throw new IOException("Exceeded max jobconf size: " + fstatus.getLen() + " limit: "
                    + jobtracker.MAX_JOBCONF_SIZE);
        }
        this.localJobFile = default_conf.getLocalPath(JobTracker.SUBDIR + "/" + jobId + ".xml");
        Path jobFilePath = JobSubmissionFiles.getJobConfPath(jobSubmitDir);
        jobFile = jobFilePath.toString();
        fs.copyToLocalFile(jobFilePath, localJobFile);
        conf = new JobConf(localJobFile);
        if (conf.getUser() == null) {
            this.conf.setUser(user);
        }
        if (!conf.getUser().equals(user)) {
            String desc = "The username " + conf.getUser() + " obtained from the "
                    + "conf doesn't match the username " + user + " the user " + "authenticated as";
            AuditLogger.logFailure(user, Operation.SUBMIT_JOB.name(), conf.getUser(), jobId.toString(), desc);
            throw new IOException(desc);
        }

        this.priority = conf.getJobPriority();
        this.status.setJobPriority(this.priority);
        String queueName = conf.getQueueName();
        this.profile = new JobProfile(user, jobId, jobFile, url, conf.getJobName(), queueName);

        Queue queue = this.jobtracker.getQueueManager().getQueue(queueName);
        if (queue == null) {
            throw new IOException("Queue \"" + queueName + "\" does not exist");
        }
        this.queueMetrics = queue.getMetrics();
        this.queueMetrics.addPrepJob(conf, jobId);

        this.submitHostName = conf.getJobSubmitHostName();
        this.submitHostAddress = conf.getJobSubmitHostAddress();
        this.numMapTasks = conf.getNumMapTasks();
        this.numReduceTasks = conf.getNumReduceTasks();

        this.memoryPerMap = conf.getMemoryForMapTask();
        this.memoryPerReduce = conf.getMemoryForReduceTask();

        this.taskCompletionEvents = new ArrayList<TaskCompletionEvent>(numMapTasks + numReduceTasks + 10);

        // Construct the jobACLs
        status.setJobACLs(jobtracker.getJobACLsManager().constructJobACLs(conf));

        this.mapFailuresPercent = conf.getMaxMapTaskFailuresPercent();
        this.reduceFailuresPercent = conf.getMaxReduceTaskFailuresPercent();

        this.maxTaskFailuresPerTracker = conf.getMaxTaskFailuresPerTracker();

        hasSpeculativeMaps = conf.getMapSpeculativeExecution();
        hasSpeculativeReduces = conf.getReduceSpeculativeExecution();
        // a limit on the input size of the reduce.
        // we check to see if the estimated input size of 
        // of each reduce is less than this value. If not
        // we fail the job. A value of -1 just means there is no
        // limit set.
        reduce_input_limit = -1L;
        this.maxLevel = jobtracker.getNumTaskCacheLevels();
        this.anyCacheLevel = this.maxLevel + 1;
        this.nonLocalMaps = new LinkedList<TaskInProgress>();
        this.failedMaps = new TreeSet<TaskInProgress>(failComparator);
        this.nonLocalRunningMaps = new LinkedHashSet<TaskInProgress>();
        this.runningMapCache = new IdentityHashMap<Node, Set<TaskInProgress>>();
        this.nonRunningReduces = new TreeSet<TaskInProgress>(failComparator);
        this.runningReduces = new LinkedHashSet<TaskInProgress>();
        this.resourceEstimator = new ResourceEstimator(this);
        this.reduce_input_limit = conf.getLong("mapreduce.reduce.input.limit", DEFAULT_REDUCE_INPUT_LIMIT);
        // register job's tokens for renewal
        DelegationTokenRenewal.registerDelegationTokensForRenewal(jobInfo.getJobID(), ts, jobtracker.getConf());

        // Check task limits
        checkTaskLimits();
    } finally {
        //close all FileSystems that was created above for the current user
        //At this point, this constructor is called in the context of an RPC, and
        //hence the "current user" is actually referring to the kerberos
        //authenticated user (if security is ON).
        FileSystem.closeAllForUGI(UserGroupInformation.getCurrentUser());
    }
}

From source file:de.javakaffee.web.msm.integration.TestUtils.java

public static void assertDeepEquals(final Object one, final Object another) {
    assertDeepEquals(one, another, new IdentityHashMap<Object, Object>());
}