Example usage for java.util TreeMap get

List of usage examples for java.util TreeMap get

Introduction

In this page you can find the example usage for java.util TreeMap get.

Prototype

public V get(Object key) 

Source Link

Document

Returns the value to which the specified key is mapped, or null if this map contains no mapping for the key.

Usage

From source file:laboGrid.graphs.replication.ReplicationGraphHeuristicGenerator.java

public ReplicationGraph computeReplicationGraph(DAId[] das, GraphMapping cGraph, int backupDegree) {

    // Creating Peers dynamic structure
    TreeMap<String, Set<Integer>> dynPeers = new TreeMap<String, Set<Integer>>();
    Set<Integer>[] da2Sub = cGraph.getDa2Sub();
    for (int i = 0; i < das.length; ++i) {
        DAId c = das[i];/*  w  ww . j av a 2s  .c om*/
        if (da2Sub[i] != null && !da2Sub[i].isEmpty()) { // Da can be taken into account in backup graph
            //            String resourcePeer = c.getPeerId();
            String resourcePeer = "peerId";
            Set<Integer> resources = dynPeers.get(resourcePeer);
            if (resources == null) {
                System.out.println("Detected Peer: " + resourcePeer + ".");
                resources = new TreeSet<Integer>();
                dynPeers.put(resourcePeer, resources);
            }
            resources.add(i);
        }
    }

    if (dynPeers.size() == 1) {

        ReplicationGraphNaiveGenerator naiveGen = new ReplicationGraphNaiveGenerator();
        return naiveGen.computeReplicationGraph(das, cGraph, backupDegree);

    } else {

        // Convert dynamic structure into a static one
        Set<Integer>[] peers = new TreeSet[dynPeers.size()];
        Iterator<Entry<String, Set<Integer>>> it = dynPeers.entrySet().iterator();
        for (int i = 0; i < peers.length; ++i) {
            Entry<String, Set<Integer>> e = it.next();
            peers[i] = e.getValue();
        }

        return new ReplicationGraph(replicationGraph(das.length, backupDegree, peers));
    }

}

From source file:org.apache.hadoop.hbase.master.balancer.BalancerTestBase.java

/**
 * Checks whether region replicas are not hosted on the same host.
 *///from  w  w w .  j a  va 2 s .co m
public void assertRegionReplicaPlacement(Map<ServerName, List<HRegionInfo>> serverMap,
        RackManager rackManager) {
    TreeMap<String, Set<HRegionInfo>> regionsPerHost = new TreeMap<String, Set<HRegionInfo>>();
    TreeMap<String, Set<HRegionInfo>> regionsPerRack = new TreeMap<String, Set<HRegionInfo>>();

    for (Entry<ServerName, List<HRegionInfo>> entry : serverMap.entrySet()) {
        String hostname = entry.getKey().getHostname();
        Set<HRegionInfo> infos = regionsPerHost.get(hostname);
        if (infos == null) {
            infos = new HashSet<HRegionInfo>();
            regionsPerHost.put(hostname, infos);
        }

        for (HRegionInfo info : entry.getValue()) {
            HRegionInfo primaryInfo = RegionReplicaUtil.getRegionInfoForDefaultReplica(info);
            if (!infos.add(primaryInfo)) {
                Assert.fail("Two or more region replicas are hosted on the same host after balance");
            }
        }
    }

    if (rackManager == null) {
        return;
    }

    for (Entry<ServerName, List<HRegionInfo>> entry : serverMap.entrySet()) {
        String rack = rackManager.getRack(entry.getKey());
        Set<HRegionInfo> infos = regionsPerRack.get(rack);
        if (infos == null) {
            infos = new HashSet<HRegionInfo>();
            regionsPerRack.put(rack, infos);
        }

        for (HRegionInfo info : entry.getValue()) {
            HRegionInfo primaryInfo = RegionReplicaUtil.getRegionInfoForDefaultReplica(info);
            if (!infos.add(primaryInfo)) {
                Assert.fail("Two or more region replicas are hosted on the same rack after balance");
            }
        }
    }
}

From source file:org.apache.hadoop.hive.ql.exec.ComputationBalancerReducer.java

public void reduce(BytesWritable wrappedKey, Text wrappedValue) {
    final String _key = ToolBox.getOriginalKey(wrappedKey);

    if (_key.startsWith(StatsCollectionOperator.FIELDLENGTH_ATTR)) {
        Integer iValue = Integer.valueOf(wrappedValue.toString());
        Integer _reg_ = infoDict.get(_key);
        if (_reg_ == null) {
            _reg_ = iValue;//from  w w  w. j  a  va 2 s  .c  o m
        } else {
            _reg_ += iValue;
        }
        infoDict.put(_key, _reg_);
        LOG.debug("FieldLength:  " + _key + " " + _reg_);

    } else if (_key.startsWith(StatsCollectionOperator.NULLCOUNTER_ATTR)) {
        Integer iValue = Integer.valueOf(wrappedValue.toString());
        Integer _reg_ = infoDict.get(_key);
        if (_reg_ == null) {
            _reg_ = iValue;
        } else {
            _reg_ += iValue;
        }
        infoDict.put(_key, _reg_);
        LOG.debug("NullCounter:  " + _key + " " + _reg_);

    } else if (_key.startsWith(StatsCollectionOperator.RECORDSNUM_ATTR)) {
        Integer iValue = Integer.valueOf(wrappedValue.toString());
        stat_num_records += iValue;

    } else if (_key.startsWith(SampleOperator.SAMPLE_COUNTER_ATTR)) {
        Integer iValue = Integer.valueOf(wrappedValue.toString());
        _sampledRecordNumber_ += iValue;

    } else if (_key.startsWith(SampleOperator.SAMPLE_DATA_ATTR)) {
        _testList.add(wrappedValue);

    } else if (_key.startsWith(HistogramOperator.MCVLIST_ATTR)) {
        if (mcvList == null) {
            return;
        }
        {
            StringTokenizer _hst_ = new StringTokenizer(wrappedValue.toString(), ToolBox.hiveDelimiter);
            String _true_value_ = _hst_.nextToken();
            String _true_fre_ = _hst_.nextToken();
            TreeMap<String, Integer> _valfre_map_ = mcvList.get(_key);

            if (_valfre_map_ == null) {
                _valfre_map_ = new TreeMap<String, Integer>();
                _valfre_map_.put(_true_value_, Integer.valueOf(_true_fre_));
                mcvList.put(_key, _valfre_map_);
            } else {
                Integer _o_fre_ = _valfre_map_.get(_true_value_);
                if (_o_fre_ == null) {
                    _o_fre_ = Integer.valueOf(0);
                }
                _o_fre_ += Integer.valueOf(_true_fre_);
                _valfre_map_.put(_true_value_, _o_fre_);

            }

            if (_valfre_map_.keySet().size() > 512) {
                ToolBox _tb = new ToolBox();
                _tb.compact(_valfre_map_, ToolBox.SortMethod.DescendSort, Integer.valueOf(512));
            }

        }

    } else if (_key.startsWith(SampleOperator.STATISTICS_SAMPLING_FACTOR_ATTR)) {
        Integer ax = Integer.valueOf(wrappedKey.toString());
        stat_sampled_counter += ax;
    }

}

From source file:de.tudarmstadt.ukp.experiments.dip.wp1.documents.Step9AgreementCollector.java

@SuppressWarnings("unchecked")
public static void computeObservedAgreement(File goldDataFolder, File outputDir) throws Exception {
    // iterate over query containers
    for (File f : FileUtils.listFiles(goldDataFolder, new String[] { "xml" }, false)) {
        QueryResultContainer queryResultContainer = QueryResultContainer
                .fromXML(FileUtils.readFileToString(f, "utf-8"));

        for (QueryResultContainer.SingleRankedResult rankedResult : queryResultContainer.rankedResults) {

            // only non-empty and annotated results
            // No annotations found for document: clueWebID: clueweb12-1407wb-22-10643, queryID: 1006
            // <clueWebID>clueweb12-1407wb-22-10643</clueWebID>
            // <score>5.93809186</score>
            // <additionalInfo>indri</additionalInfo>
            // <plainText></plainText>

            if (rankedResult.plainText != null && !rankedResult.plainText.isEmpty()) {
                if (rankedResult.mTurkRelevanceVotes.isEmpty()) {
                    //                        throw new IllegalStateException("No annotations found for document: "
                    System.err.println("No annotations found for document: " + "clueWebID: "
                            + rankedResult.clueWebID + ", queryID: " + queryResultContainer.qID);
                } else {

                    // first, get all the sentence IDs
                    byte[] bytes = new BASE64Decoder()
                            .decodeBuffer(new ByteArrayInputStream(rankedResult.originalXmi.getBytes()));

                    JCas jCas = JCasFactory.createJCas();
                    XmiCasDeserializer.deserialize(new ByteArrayInputStream(bytes), jCas.getCas());

                    // for each sentence, we'll collect all its annotations
                    TreeMap<Integer, SortedMap<String, String>> sentencesAndRelevanceAnnotations = collectSentenceIDs(
                            jCas);/*w  w w.j a va 2 s  . c o m*/

                    // now we will the map with mturk annotations
                    // the list of true/false for each sentence will be consistent (the annotator ordering remains)
                    for (QueryResultContainer.MTurkRelevanceVote mTurkRelevanceVote : rankedResult.mTurkRelevanceVotes) {
                        for (QueryResultContainer.SingleSentenceRelevanceVote sentenceRelevanceVote : mTurkRelevanceVote.singleSentenceRelevanceVotes) {

                            String sentenceIDString = sentenceRelevanceVote.sentenceID;
                            if (sentenceIDString == null || sentenceIDString.isEmpty()) {
                                throw new IllegalStateException("Empty sentence ID for turker "
                                        + mTurkRelevanceVote.turkID + ", HIT: " + mTurkRelevanceVote.hitID
                                        + ", clueWebID: " + rankedResult.clueWebID + ", queryID: "
                                        + queryResultContainer.qID);
                            } else {

                                Integer sentenceIDInt = Integer.valueOf(sentenceIDString);
                                String value = sentenceRelevanceVote.relevant;

                                // add to the list

                                // sanity check first
                                if (sentencesAndRelevanceAnnotations.get(sentenceIDInt)
                                        .containsKey(mTurkRelevanceVote.turkID)) {
                                    System.err.println("Annotations for sentence " + sentenceIDInt
                                            + " for turker " + mTurkRelevanceVote.turkID + " are duplicate");
                                }

                                sentencesAndRelevanceAnnotations.get(sentenceIDInt)
                                        .put(mTurkRelevanceVote.turkID, value);
                            }
                        }
                    }

                    //                    for (Map.Entry<Integer, SortedMap<String, String>> entry : sentencesAndRelevanceAnnotations
                    //                            .entrySet()) {
                    //                        System.out.println(entry.getKey() + ": " + entry.getValue());
                    //                    }

                    // we collect only the "clean" ones
                    Map<Integer, SortedMap<String, String>> cleanSentencesAndRelevanceAnnotations = new HashMap<>();

                    // sanity check -- all sentences are covered with the same number of annotations
                    for (Map.Entry<Integer, SortedMap<String, String>> entry : sentencesAndRelevanceAnnotations
                            .entrySet()) {
                        SortedMap<String, String> singleSentenceAnnotations = entry.getValue();

                        // remove empty sentences
                        if (singleSentenceAnnotations.values().isEmpty()) {
                            //                                throw new IllegalStateException(
                            System.err.println("Empty annotations for sentence, " + "sentenceID: "
                                    + entry.getKey() + ", " + "clueWebID: " + rankedResult.clueWebID
                                    + ", queryID: " + queryResultContainer.qID + "; number of assignments: "
                                    + singleSentenceAnnotations.values().size() + ", expected: "
                                    + NUMBER_OF_TURKERS_PER_HIT + ". Sentence will be skipped in evaluation");
                        } else if (singleSentenceAnnotations.values().size() != NUMBER_OF_TURKERS_PER_HIT) {
                            System.err.println("Inconsistent annotations for sentences, " + "sentenceID: "
                                    + entry.getKey() + ", " + "clueWebID: " + rankedResult.clueWebID
                                    + ", queryID: " + queryResultContainer.qID + "; number of assignments: "
                                    + singleSentenceAnnotations.values().size() + ", expected: "
                                    + NUMBER_OF_TURKERS_PER_HIT + ". Sentence will be skipped in evaluation");
                        } else {
                            cleanSentencesAndRelevanceAnnotations.put(entry.getKey(), entry.getValue());
                        }
                    }

                    // fill the annotation study

                    CodingAnnotationStudy study = new CodingAnnotationStudy(NUMBER_OF_TURKERS_PER_HIT);
                    study.addCategory("true");
                    study.addCategory("false");

                    for (SortedMap<String, String> singleSentenceAnnotations : cleanSentencesAndRelevanceAnnotations
                            .values()) {
                        // only non-empty sentences
                        Collection<String> values = singleSentenceAnnotations.values();
                        if (!values.isEmpty() && values.size() == NUMBER_OF_TURKERS_PER_HIT) {
                            study.addItemAsArray(values.toArray());
                        }

                    }

                    //                    System.out.println(study.getCategories());

                    // Fleiss' multi-pi.
                    FleissKappaAgreement fleissKappaAgreement = new FleissKappaAgreement(study);

                    double percentage;
                    try {
                        percentage = fleissKappaAgreement.calculateObservedAgreement();
                    } catch (InsufficientDataException ex) {
                        // dkpro-statistics feature, see https://github.com/dkpro/dkpro-statistics/issues/24
                        percentage = 1.0;
                    }

                    if (!Double.isNaN(percentage)) {
                        rankedResult.observedAgreement = percentage;
                        //                        System.out.println(sentencesAndRelevanceAnnotations.values());
                    } else {
                        System.err.println("Observed agreement is NaN.");
                    }
                }
            }
        }

        // and save the query to output dir
        File outputFile = new File(outputDir, queryResultContainer.qID + ".xml");
        FileUtils.writeStringToFile(outputFile, queryResultContainer.toXML(), "utf-8");
        System.out.println("Finished " + outputFile);
    }
}

From source file:com.sfs.whichdoctor.analysis.AgedDebtorsAnalysisDAOImpl.java

/**
 * Post process the aged debtors groupings.
 *
 * @param groupings the groupings/* w w w .j  a v  a  2 s  .  co m*/
 * @param showZeroBalances the show zero balances
 * @return the tree map
 */
private TreeMap<String, AgedDebtorsGrouping> processGroups(final TreeMap<String, AgedDebtorsGrouping> groupings,
        final boolean showZeroBalances) {

    TreeMap<String, AgedDebtorsGrouping> processedGroupings = new TreeMap<String, AgedDebtorsGrouping>();

    for (String groupName : groupings.keySet()) {

        AgedDebtorsGrouping group = groupings.get(groupName);

        TreeMap<String, AgedDebtorsRecord> records = new TreeMap<String, AgedDebtorsRecord>();

        for (String orderKey : group.getRecords().keySet()) {

            AgedDebtorsRecord record = group.getRecords().get(orderKey);

            if (showZeroBalances || record.getTotal() != 0) {
                records.put(orderKey, record);

                for (int id : record.getPeriodBreakdown().keySet()) {
                    AgedDebtorsPeriod period = record.getPeriodBreakdown().get(id);
                    AgedDebtorsPeriod groupPeriod = group.getPeriodBreakdown(period);

                    // Update the running totals for the period of the grouping
                    groupPeriod.setOutstandingDebitValue(
                            groupPeriod.getOutstandingDebitValue() + period.getOutstandingDebitValue());

                    groupPeriod.setUnallocatedRefundValue(
                            groupPeriod.getUnallocatedRefundValue() + period.getUnallocatedRefundValue());

                    groupPeriod.setUnallocatedCreditValue(
                            groupPeriod.getUnallocatedCreditValue() + period.getUnallocatedCreditValue());

                    groupPeriod.setUnallocatedReceiptValue(
                            groupPeriod.getUnallocatedReceiptValue() + period.getUnallocatedReceiptValue());

                    group.getPeriodBreakdown().put(id, groupPeriod);
                }

                // Update the running totals for the grouping
                group.setOutstandingDebitValue(
                        group.getOutstandingDebitValue() + record.getOutstandingDebitValue());

                group.setUnallocatedRefundValue(
                        group.getUnallocatedRefundValue() + record.getUnallocatedRefundValue());

                group.setUnallocatedCreditValue(
                        group.getUnallocatedCreditValue() + record.getUnallocatedCreditValue());

                group.setUnallocatedReceiptValue(
                        group.getUnallocatedReceiptValue() + record.getUnallocatedReceiptValue());
            }
        }
        group.setRecords(records);
        processedGroupings.put(groupName, group);
    }
    return processedGroupings;
}

From source file:eu.sisob.uma.crawler.AirResearchersWebPagesExtractor.java

/**
 * /*from w w  w  .j a va  2s .  c om*/
 * @param elementUnitOfAssessment
 * @param path
 * @param sInstitutionName
 * @param sWebAddress
 * @param sUnitOfAssessment_Description     
 * @return  
 */
@Override
protected boolean actionsInUnitOfAssessmentNode(org.dom4j.Element elementUnitOfAssessment, String path,
        String sInstitutionName, String sWebAddress, String sUnitOfAssessment_Description) {
    if (refuseExecution)
        return false;

    String crawler_data_folder = this.work_dir + File.separator + CRAWLER_DATA_FOLDERNAME;

    List<String> department_web_addresses = new ArrayList<String>();
    List<ResearcherNameInfo> researchers = new ArrayList<ResearcherNameInfo>();

    String seed = sWebAddress;
    String contain_pattern = seed.replace("http://www.", "");
    int index = contain_pattern.indexOf("/");
    if (index == -1)
        index = contain_pattern.length() - 1;
    contain_pattern = contain_pattern.substring(0, index);

    /*
     * Taking departments webpages to search in the researchers webpages
     */
    for (Iterator<org.dom4j.Element> department_web_address_it = elementUnitOfAssessment
            .elementIterator(XMLTags.DEPARTMENT_WEB_ADDRESS); department_web_address_it.hasNext();) {
        org.dom4j.Element department_web_address_element = (org.dom4j.Element) department_web_address_it.next();
        if (!department_web_address_element.getText().equals(""))
            department_web_addresses.add(department_web_address_element.getText());
    }

    /*
     * If there is not department webpage, then, add the university web to find staff page and something similar
     */
    if (department_web_addresses.isEmpty()) {
        ProjectLogger.LOGGER.info("There is not dept webpages for [" + sUnitOfAssessment_Description + " - "
                + sInstitutionName + "]. Adding " + sWebAddress);
        //department_web_addresses.add(sWebAddress);
    }

    /*
     * Taking researchers info to search the researchers webs
     */
    for (Iterator<org.dom4j.Element> research_group_it = elementUnitOfAssessment
            .elementIterator(XMLTags.RESEARCHGROUP); research_group_it.hasNext();) {
        org.dom4j.Element research_group_element = research_group_it.next();

        for (Iterator<org.dom4j.Element> reseacher_it = research_group_element
                .elementIterator(XMLTags.RESEARCHER); reseacher_it.hasNext();) {
            org.dom4j.Element reseacher_element = reseacher_it.next();

            String initials = reseacher_element.element(XMLTags.RESEARCHER_INITIALS).getText();
            String last_name = reseacher_element.element(XMLTags.RESEARCHER_LASTNAME).getText();
            String first_name = reseacher_element.element(XMLTags.RESEARCHER_FIRSTNAME) == null ? ""
                    : reseacher_element.element(XMLTags.RESEARCHER_FIRSTNAME).getText();
            String whole_name = reseacher_element.element(XMLTags.RESEARCHER_NAME) == null ? ""
                    : reseacher_element.element(XMLTags.RESEARCHER_NAME).getText();

            ResearcherNameInfo rsi = new ResearcherNameInfo(last_name, initials, first_name, whole_name);
            researchers.add(rsi);
        }
    }

    if (researchers.size() > 0 && !department_web_addresses.isEmpty()) {
        /*
         * Crawling to search the researchers
         */
        CrawlerResearchesPagesV3Controller controllerReseachers = null;
        try {
            String university_subject_crawler_data_folder = crawler_data_folder + File.separator
                    + sInstitutionName.replaceAll("\\W+", "").toLowerCase() + "-"
                    + sUnitOfAssessment_Description.replaceAll("\\W+", "").toLowerCase() + "-crawler-data";
            File university_subject_crawler_data_dir = new File(university_subject_crawler_data_folder);
            if (university_subject_crawler_data_dir.exists())
                FileFootils.deleteDir(university_subject_crawler_data_dir);

            controllerReseachers = new CrawlerResearchesPagesV3Controller(
                    university_subject_crawler_data_folder, this.keywords_data_dir, researchers);
            String sSeeds = "";
            for (String s : department_web_addresses) {
                controllerReseachers.addSeed(s);
                sSeeds += s + ",";
            }

            controllerReseachers.setPolitenessDelay(200);
            controllerReseachers.setMaximumCrawlDepth(3);
            controllerReseachers.setMaximumPagesToFetch(-1);
            controllerReseachers.setContainPattern(contain_pattern);
            controllerReseachers.clearInterestingUrlsDetected();

            ProjectLogger.LOGGER.info("Begin crawling: " + sUnitOfAssessment_Description + " - "
                    + sInstitutionName + " - [" + StringUtils.join(department_web_addresses, ",") + "]");
            long lTimerAux = java.lang.System.currentTimeMillis();

            controllerReseachers.start(CrawlerResearchesPagesV3.class, 1);

            controllerReseachers.postProcessResults();

            lTimerAux = java.lang.System.currentTimeMillis() - lTimerAux;
            ProjectLogger.LOGGER.info(
                    "End crawling: " + sUnitOfAssessment_Description + " - " + sInstitutionName + " - Time: "
                            + lTimerAux + " ms - [" + StringUtils.join(department_web_addresses, ",") + "]");
        } catch (Exception ex) {
            ProjectLogger.LOGGER.error(ex.getMessage(), ex);
        } finally {
            if (CrawlerTrace.isTraceUrlsActive() && controllerReseachers != null)
                controllerReseachers.closeCrawlerTrace();
        }

        /*
         * Update results
         */
        if (controllerReseachers != null) {
            /*
             * Print the researchers
             */
            if (CrawlerTrace.isTraceSearchActive()) {
                CandidateTypeURL
                        .printResults(
                                "Results of: " + sUnitOfAssessment_Description + " - " + sInstitutionName + " ("
                                        + sWebAddress + ") by TYPE",
                                controllerReseachers.getInterestingUrlsDetected());
            }

            counterTotal[0] = 0;
            counterSuccess[0] = 0;

            try {
                /*
                 * Add researcher webs to xml document             
                 */
                for (Iterator<org.dom4j.Element> research_group_it = elementUnitOfAssessment
                        .elementIterator(XMLTags.RESEARCHGROUP); research_group_it.hasNext();) {
                    org.dom4j.Element research_group_element = research_group_it.next();

                    for (Iterator<org.dom4j.Element> researcher_it = research_group_element
                            .elementIterator(XMLTags.RESEARCHER); researcher_it.hasNext();) {
                        counterTotal[0]++;
                        org.dom4j.Element researcher_element = researcher_it.next();

                        String initials = researcher_element.element(XMLTags.RESEARCHER_INITIALS).getText();
                        String last_name = researcher_element.element(XMLTags.RESEARCHER_LASTNAME).getText();
                        String first_name = researcher_element.element(XMLTags.RESEARCHER_FIRSTNAME) == null
                                ? ""
                                : researcher_element.element(XMLTags.RESEARCHER_FIRSTNAME).getText();
                        String whole_name = researcher_element.element(XMLTags.RESEARCHER_NAME) == null ? ""
                                : researcher_element.element(XMLTags.RESEARCHER_NAME).getText();

                        ResearcherNameInfo researcher_name_info = new ResearcherNameInfo(last_name, initials,
                                first_name, whole_name);
                        researcher_name_info.first_name = CandidateTypeURL
                                .getCanonicalName(researcher_name_info.first_name);
                        researcher_name_info.last_name = CandidateTypeURL
                                .getCanonicalName(researcher_name_info.last_name);
                        researcher_name_info.initial = CandidateTypeURL
                                .getCanonicalName(researcher_name_info.initial);
                        researcher_name_info.whole_name = CandidateTypeURL
                                .getCanonicalName(researcher_name_info.whole_name);

                        TreeMap<String, List<CandidateTypeURL>> t = controllerReseachers
                                .getInterestingUrlsDetected();

                        List<CandidateTypeURL> lst = t
                                .get(CrawlerResearchesPagesV3Controller.RESEARCHER_RESULT_TAG);

                        boolean bExist = false;
                        if (lst != null) {
                            //FIXME, contains and remove better
                            boolean lock1 = true;
                            for (CandidateTypeURL ss : lst) {
                                if (researcher_name_info.equals(ss.data)) {
                                    ProjectLogger.LOGGER.info("Add researcher '" + researcher_name_info
                                            + "' the url '" + ss.sURL + "'");
                                    researcher_element.addElement(XMLTags.RESEARCHER_WEB_ADDRESS)
                                            .addAttribute(XMLTags.RESEARCHER_WEB_ADDRESS_ATTR_TYPE, ss.sSubType)
                                            .addAttribute(XMLTags.RESEARCHER_WEB_ADDRESS_ATTR_EXT, ss.sExt)
                                            .addText(ss.sURL);
                                    lock1 = false;
                                    bExist = true;
                                }
                            }
                        }
                        if (bExist) {
                            counterSuccess[0]++;
                        } else {
                            ProjectLogger.LOGGER.warn("No webpage for " + researcher_name_info);
                        }
                    }
                }
            } catch (Exception ex) {
                ProjectLogger.LOGGER.error("Error", ex);
            }

            /*
             * Show a little counting result
             */
            ProjectLogger.LOGGER.info("Researches results: " + sInstitutionName + " - "
                    + sUnitOfAssessment_Description + " - " + counterSuccess[0] + " / " + counterTotal[0]);
            counterTotal[1] += 1;
            counterSuccess[1] += counterSuccess[0] > 0 ? 1 : 0;

            counterSuccess[2] += counterSuccess[0];
            counterTotal[2] += counterTotal[0];
        }
    }

    return true;
}

From source file:org.apache.openmeetings.web.room.wb.WbPanel.java

@Override
void internalWbLoad(StringBuilder sb) {
    Long langId = rp.getClient().getUser().getLanguageId();
    if (!wbm.contains(roomId) && rp.getRoom().getFiles() != null && !rp.getRoom().getFiles().isEmpty()) {
        if (wbm.tryLock(roomId)) {
            try {
                TreeMap<Long, List<BaseFileItem>> files = new TreeMap<>();
                for (RoomFile rf : rp.getRoom().getFiles()) {
                    List<BaseFileItem> bfl = files.get(rf.getWbIdx());
                    if (bfl == null) {
                        files.put(rf.getWbIdx(), new ArrayList<>());
                        bfl = files.get(rf.getWbIdx());
                    }/*ww w . j a  v  a 2 s  .co  m*/
                    bfl.add(rf.getFile());
                }
                Whiteboards _wbs = wbm.get(roomId, langId);
                for (Map.Entry<Long, List<BaseFileItem>> e : files.entrySet()) {
                    Whiteboard wb = wbm.add(roomId, langId);
                    _wbs.setActiveWb(wb.getId());
                    for (BaseFileItem fi : e.getValue()) {
                        sendFileToWb(fi, false);
                    }
                }
            } finally {
                wbm.unlock(roomId);
            }
        }
    }
    Whiteboards wbs = wbm.get(roomId, langId);
    loadWhiteboards(sb, rp.getClient(), wbs, wbm.list(roomId));
    JSONObject wbj = getWbJson(wbs.getActiveWb());
    sb.append("WbArea.activateWb(").append(wbj).append(");");
    Whiteboard wb = wbs.get(wbs.getActiveWb());
    if (wb != null) {
        sb.append("WbArea.setSlide(").append(wbj.put(ATTR_SLIDE, wb.getSlide())).append(");");
    }
    sb.append("WbArea.loadVideos();");
}

From source file:gda.scan.ConcurrentScanChild.java

TreeMap<Integer, Scannable[]> generateDevicesToMoveByLevel(TreeMap<Integer, Scannable[]> scannableLevels,
        Vector<Detector> detectors) {

    TreeMap<Integer, Scannable[]> devicesToMoveByLevel = new TreeMap<Integer, Scannable[]>();
    devicesToMoveByLevel.putAll(scannableLevels);

    for (Scannable detector : detectors) {

        Integer level = detector.getLevel();

        if (devicesToMoveByLevel.containsKey(level)) {
            Scannable[] levelArray = devicesToMoveByLevel.get(level);
            levelArray = (Scannable[]) ArrayUtils.add(levelArray, detector);
            devicesToMoveByLevel.put(level, levelArray);
        } else {//from   ww w.j  a v a 2  s.c o m
            Scannable[] levelArray = new Scannable[] { detector };
            devicesToMoveByLevel.put(level, levelArray);
        }
    }
    return devicesToMoveByLevel;
}

From source file:com.sfs.whichdoctor.importer.Importer.java

/**
 * Sets the column map./* w  w w  . java  2s .com*/
 *
 * @param type the type
 * @param data the data
 * @param includeRowsVal the include rows
 */
public final void setColumnMap(final String type, final TreeMap<Integer, TreeMap<Integer, String>> data,
        final TreeMap<Integer, String> includeRowsVal) {

    TreeMap<Integer, String> columnMapVal = new TreeMap<Integer, String>();

    List<String> fields = new ArrayList<String>();

    if (StringUtils.equalsIgnoreCase(type, "exam")) {
        ExamImporter examImporter = new ExamImporter();
        fields = examImporter.getFields();
    }

    // Inspect the first row of data supplied
    Integer rowIndex = data.keySet().iterator().next();

    TreeMap<Integer, String> firstRow = data.get(rowIndex);

    int fieldMatches = 0;

    for (Integer columnNumber : firstRow.keySet()) {
        String dataField = firstRow.get(columnNumber);
        String fieldName = "";

        // Iterate through each field to see if there is a match
        // If there is more than two matches then the first row
        // is indicating column field names
        for (int i = 0; i < fields.size(); i++) {
            String field = (String) fields.get(i);
            if (StringUtils.equalsIgnoreCase(dataField, field)) {
                // Matching field
                fieldName = dataField;
                fieldMatches++;
            }
        }
        columnMapVal.put(columnNumber, fieldName);
    }
    if (fieldMatches > 2) {
        // There were more than two field matches
        // Deselect the first column from the list of imports
        if (includeRowsVal.containsKey(rowIndex)) {
            includeRowsVal.remove(rowIndex);
        }
    }

    setIncludeRows(includeRowsVal);
    setColumnMap(columnMapVal);

}

From source file:checkdb.CheckDb.java

/**
 * Build a map of all channels that are derived from each base name for a single IFO:Subsystem
 * @param ifoSubsys String of the form IFO:Subsystem[-_] 
 */// w  w w  .java  2 s  . c  o  m
private void buildChanStats(String ifoSubsys) {
    try {
        chanstats.clear();

        chnTbl.streamByName(ifoSubsys + "%");
        ChanInfo ci;
        count = 0;
        while ((ci = chnTbl.streamNext()) != null) {
            count++;
            if (verbose > 2 && count > 0 && count % 100000 == 0) {
                System.out.format("\033[2K %,8d\r", count);
                System.out.flush();
            }
            String name = ci.getChanName();
            String basename = ci.getBaseName();
            String serv = ci.getServer();
            serv = serv.replace(".caltech.edu", "");
            String key = basename;
            TreeMap<String, ChanStat> chanstatLst = chanstats.get(key);
            ChanStat chanstat;
            if (chanstatLst == null) {
                chanstatLst = new TreeMap<>();
                chanstat = new ChanStat();
                chanstatLst.put(serv, chanstat);
                chanstats.put(key, chanstatLst);
            } else {
                chanstat = chanstatLst.get(serv);
                if (chanstat == null) {
                    chanstat = new ChanStat();
                    chanstatLst.put(serv, chanstat);
                }
            }
            chanstat.add(ci);

            chanstats.put(key, chanstatLst);
        }

    } catch (SQLException ex) {
        Logger.getLogger(CheckDb.class.getName()).log(Level.SEVERE, null, ex);
        try {
            chnTbl.streamClose();
        } catch (SQLException ex1) {
            Logger.getLogger(CheckDb.class.getName()).log(Level.SEVERE, null, ex1);
        }
    } finally {
        try {
            chnTbl.streamClose();
        } catch (SQLException ex) {
            Logger.getLogger(CheckDb.class.getName()).log(Level.SEVERE, null, ex);
        }
    }
}