Example usage for java.util HashSet iterator

List of usage examples for java.util HashSet iterator

Introduction

In this page you can find the example usage for java.util HashSet iterator.

Prototype

public Iterator<E> iterator() 

Source Link

Document

Returns an iterator over the elements in this set.

Usage

From source file:edu.ucla.cs.scai.canali.core.index.BuildIndex.java

private HashSet<Integer> findCommonLowestAncestor(Set<Integer> classes) {
    HashSet<Integer> finalClasses = new HashSet<>(classes);
    while (finalClasses.size() > 1) {
        Iterator<Integer> it = finalClasses.iterator();
        int c1 = it.next();
        it.remove();//from  w w w  .  j av  a 2  s.c o  m
        int c2 = it.next();
        it.remove();
        HashSet<Integer> cas = findCommonLowestAncestor(c1, c2);
        for (int ca : cas) {
            finalClasses.add(ca);
        }
    }
    return finalClasses;
}

From source file:org.lexevs.system.ResourceManager.java

/**
 * Inits the./*from  w ww. ja v a 2 s  . co  m*/
 * 
 * @throws Exception the exception
 */
public void init() throws Exception {
    cache_ = Collections.synchronizedMap(new LRUMap(systemVars_.getCacheSize()));

    // This increases the ability of Lucene to do queries against
    // large indexes like the MetaThesaurus without getting errors.
    BooleanQuery.setMaxClauseCount(systemVars_.getLuceneMaxClauseCount());

    codingSchemeToServerMap_ = new Hashtable<String, String>();
    sqlServerInterfaces_ = new Hashtable<String, SQLInterface>();
    historySqlServerInterfaces_ = new Hashtable<String, SQLHistoryInterface>();
    codingSchemeLocalNamesToInternalNameMap_ = new Hashtable<String, Hashtable<String, String>>();
    internalCodingSchemeNameUIDMap_ = new Hashtable<String, List<LocalCodingScheme>>();
    supportedCodingSchemeToInternalMap_ = new Hashtable<String, String>();

    // populate the registry
    //registry_ = new XmlRegistry(systemVars_.getAutoLoadRegistryPath());

    // connect to the histories
    readHistories();

    // go through all of the sql servers and read all of the available code
    // systems.
    // initialize the SQL connections to each server.

    org.lexevs.registry.service.XmlRegistry.DBEntry[] entries = registry_.getDBEntries();
    for (int i = 0; i < entries.length; i++) {
        SQLConnectionInfo temp = new SQLConnectionInfo();
        temp.driver = systemVars_.getAutoLoadDBDriver();
        temp.password = systemVars_.getAutoLoadDBPassword();
        temp.server = entries[i].dbURL;
        temp.prefix = entries[i].prefix;
        temp.username = systemVars_.getAutoLoadDBUsername();
        readTerminologiesFromServer(temp);
    }

    logger_.debug("Reading available terminologies from SQL servers.");

    // same thing as above, this time for pre-configured servers
    Hashtable<String, SQLConnectionInfo> servers = systemVars_.getSqlServers();

    Enumeration<SQLConnectionInfo> e = servers.elements();
    while (e.hasMoreElements()) {
        SQLConnectionInfo server = e.nextElement();
        readTerminologiesFromServer(server);
    }

    logger_.debug("Reading available terminologies from the lucene index locations");

    // go through all of the index locations, finding the right index for
    // each code system.
    // initialize the index readers.
    HashSet<String> indexLocations = systemVars_.getIndexLocations();
    Iterator<String> iterator = indexLocations.iterator();

    indexInterfaces_ = new Hashtable<String, IndexInterface>();
    codingSchemeToIndexMap_ = new Hashtable<String, String>();

    while (iterator.hasNext()) {
        String location = iterator.next();

        File temp = new File(location);
        if (!temp.exists() || !temp.isDirectory()) {
            logger_.error("Bad index location " + location);
        } else {

            IndexInterface is = new IndexInterface(location);
            indexInterfaces_.put(location, is);

            ArrayList<String> keys = is.getCodeSystemKeys();
            for (int i = 0; i < keys.size(); i++) {
                codingSchemeToIndexMap_.put(keys.get(i), location);
            }
        }
    }

    // Start up a thread to handle scheduled deactivations
    fdt_ = new FutureDeactivatorThread();
    deactivatorThread_ = new Thread(fdt_);
    // This allows the JVM to exit while this thread is still active.
    deactivatorThread_.setDaemon(true);
    deactivatorThread_.start();
}

From source file:uk.ac.cam.caret.sakai.rwiki.tool.service.impl.PopulateServiceImpl.java

private void updateReferences(RWikiCurrentObject rwo, String space) {

    // render to get a list of links
    final HashSet referenced = new HashSet();
    final String currentRealm = rwo.getRealm();

    PageLinkRenderer plr = new PageLinkRenderer() {
        public void appendLink(StringBuffer buffer, String name, String view) {
            referenced.add(NameHelper.globaliseName(name, currentRealm));
        }/*from  w w w.  j a v  a2  s. c om*/

        public void appendLink(StringBuffer buffer, String name, String view, String anchor) {
            referenced.add(NameHelper.globaliseName(name, currentRealm));
        }

        public void appendCreateLink(StringBuffer buffer, String name, String view) {
            referenced.add(NameHelper.globaliseName(name, currentRealm));
        }

        public void appendLink(StringBuffer buffer, String name, String view, String anchor,
                boolean autoGenerated) {
            if (!autoGenerated) {
                this.appendLink(buffer, name, view, anchor);
            }
        }

        public boolean isCachable() {
            return false; // should not cache this render op
        }

        public boolean canUseCache() {
            return false;
        }

        public void setCachable(boolean cachable) {
            // do nothing
        }

        public void setUseCache(boolean b) {
            // do nothing
        }

    };

    renderService.renderPage(rwo, space, plr);

    // process the references
    StringBuffer sb = new StringBuffer();
    Iterator i = referenced.iterator();
    while (i.hasNext()) {
        sb.append("::").append(i.next());
    }
    sb.append("::");
    rwo.setReferenced(sb.toString());

}

From source file:net.sourceforge.mipa.predicatedetection.lattice.sequence.SequenceWindowedLatticeChecker.java

private void computeReachableStates(SequenceLatticeIDNode CGS) {
    // TODO Auto-generated method stub
    labelingCGS(CGS);/*from  w ww  .  jav a 2 s  .  c  om*/
    if (CGS.equals(minCGS)) {
        CGS.getReachedStates().clear();
        String[] string = CGS.getSatisfiedPredicates().split(" ");
        for (int i = 0; i < string.length; i++) {
            State state = automaton.getInitialState().step(string[i].charAt(0));
            CGS.addReachedStates(state);
        }
        if (DEBUG) {
            long time_t = (new Date()).getTime();
            out.print("[ ");
            for (int i = 0; i < CGS.getID().length; i++) {
                out.print(CGS.getID()[i] + " ");
            }
            out.print("]: satisfied predicates: " + CGS.getSatisfiedPredicates());
            out.print(" reachable states: ");
            Iterator<State> it = CGS.getReachedStates().iterator();
            while (it.hasNext()) {
                State state = it.next();
                out.print(state.getName() + " ");
            }
            out.println();
            out.flush();
            wastedTime += (new Date()).getTime() - time_t;
        }
    } else {
        HashSet<State> precState = new HashSet<State>();
        for (int i = 0; i < children.length; i++) {
            String[] index = new String[children.length];
            for (int j = 0; j < children.length; j++) {
                index[j] = CGS.getID()[j];
            }
            index[i] = Integer.toString(Integer.valueOf(index[i]) - 1);
            String ID = StringUtils.join(index, ' ');
            if (getMappedLattice().get(ID) != null) {
                SequenceLatticeIDNode node = (SequenceLatticeIDNode) getMappedLattice().get(ID);
                if (node.getReachedStates().size() == 0) {
                    computeReachableStates(node);
                }
                Iterator<State> iterator = node.getReachedStates().iterator();
                while (iterator.hasNext()) {
                    precState.add(iterator.next());
                }
            }
        }
        Iterator<State> iterator = precState.iterator();
        while (iterator.hasNext()) {
            State state = iterator.next();
            String[] satisfiedPredicate = CGS.getSatisfiedPredicates().split(" ");
            for (int i = 0; i < satisfiedPredicate.length; i++) {
                if (!satisfiedPredicate[i].equals("")) {
                    char c = satisfiedPredicate[i].charAt(0);
                    State step = state.step(c);
                    CGS.addReachedStates(step);
                }
            }
        }

        if (DEBUG) {
            long time_t = (new Date()).getTime();
            out.print("[ ");
            for (int i = 0; i < CGS.getID().length; i++) {
                out.print(CGS.getID()[i] + " ");
            }
            out.print("]: satisfied predicates: " + CGS.getSatisfiedPredicates());
            out.print(" reachable states: ");
            Iterator<State> it = CGS.getReachedStates().iterator();
            while (it.hasNext()) {
                State state = it.next();
                out.print(state.getName() + " ");
            }
            out.println();
            out.flush();
            wastedTime += (new Date()).getTime() - time_t;
        }
    }
}

From source file:org.gbif.ipt.task.Eml2Rtf.java

/**
 * Add authors section.//from ww  w.j  a v a2  s. co  m
 * 
 * @param doc Document
 * @param eml EML
 * @throws DocumentException if problem occurs during add
 */
private void addAuthors(Document doc, Eml eml) throws DocumentException {
    // Creating set of authors with different names. (first names + last names).
    HashSet<Agent> tempAgents = new LinkedHashSet<Agent>();
    if (exists(eml.getResourceCreator()) && exists(eml.getResourceCreator().getLastName())) {
        tempAgents.add(eml.getResourceCreator());
    }
    if (exists(eml.getMetadataProvider()) && exists(eml.getMetadataProvider().getLastName())) {
        tempAgents.add(eml.getMetadataProvider());
    }
    tempAgents.addAll(eml.getAssociatedParties());

    // comparing and removing those repeated agents with same name and same address.
    Collection<Integer> toRemove = new ArrayList<Integer>();
    int counter = 0;
    for (Iterator<Agent> i = tempAgents.iterator(); i.hasNext(); counter++) {
        if (toRemove.contains(counter)) {
            i.next();
            i.remove();
        } else {
            Agent agentA = i.next();
            // when second iterator should be start
            boolean flag = false;
            int countTemp = 0;
            for (Iterator<Agent> j = tempAgents.iterator(); j.hasNext(); countTemp++) {
                Agent agentB = j.next();
                if (flag) {
                    if (equal(agentA.getLastName(), agentB.getLastName())
                            && equal(agentA.getFirstName(), agentB.getFirstName())
                            && equal(agentA.getAddress(), agentB.getAddress())) {
                        toRemove.add(countTemp);
                    }
                } else if (agentA.equals(agentB)) {
                    flag = true;
                }
            }
        }
    }

    Agent[] agentsArray = new Agent[tempAgents.size()];
    tempAgents.toArray(agentsArray);
    // Adding authors
    Paragraph p = new Paragraph();
    p.setFont(font);
    p.setAlignment(Element.ALIGN_CENTER);
    java.util.List<Agent> affiliations = new ArrayList<Agent>();
    int superScriptCounter = 1;
    for (int c = 0; c < agentsArray.length; c++) {
        if (exists(agentsArray[c].getLastName())) {
            if (c != 0) {
                p.add(", ");
            }
            // First Name and Last Name
            if (exists(agentsArray[c].getFirstName())) {
                p.add(agentsArray[c].getFirstName() + " ");
            }
            p.add(agentsArray[c].getLastName());
            // Looking for addresses and organisations of other authors
            // (superscripts should not be repeated).
            boolean isRepeated = false;
            // look into the affiliations array to find any previous repeated agent info.
            for (int index = 0; index < affiliations.size(); index++) {
                if (equal(agentsArray[c].getAddress(), affiliations.get(index).getAddress())
                        && equal(agentsArray[c].getOrganisation(), affiliations.get(index).getOrganisation())) {
                    p.add(createSuperScript(String.valueOf(index + 1)));
                    isRepeated = true;
                    break;
                }
            }
            // if the agent is not repeated.
            if (!isRepeated) {
                p.add(createSuperScript(String.valueOf(superScriptCounter)));
                affiliations.add(agentsArray[c]);
                superScriptCounter++;
            }
        }
    }
    doc.add(p);
    p.clear();
    doc.add(Chunk.NEWLINE);
    tempAgents.clear();
    // <AFFILIATIONS>
    p = new Paragraph();
    p.setFont(font);
    p.setAlignment(Element.ALIGN_JUSTIFIED);
    for (int c = 0; c < affiliations.size(); c++) {
        if (c != 0) {
            p.add("; ");
        }
        p.add((c + 1) + " ");
        if (exists(affiliations.get(c).getOrganisation())) {
            p.add(affiliations.get(c).getOrganisation() + ", ");
        }
        if (exists(affiliations.get(c).getAddress().getAddress())) {
            p.add(affiliations.get(c).getAddress().getAddress() + ", ");
        }
        if (exists(affiliations.get(c).getAddress().getPostalCode())) {
            p.add(affiliations.get(c).getAddress().getPostalCode() + ", ");
        }
        if (exists(affiliations.get(c).getAddress().getCity())) {
            p.add(affiliations.get(c).getAddress().getCity());
        }
        if (exists(affiliations.get(c).getAddress().getCountry())) {
            VocabularyConcept concept = vocabManager.get(Constants.VOCAB_URI_COUNTRY)
                    .findConcept(affiliations.get(c).getAddress().getCountry());
            // write country in default language as matched from vocabulary or original value
            if (exists(concept)) {
                p.add(", " + WordUtils.capitalizeFully(concept.getPreferredTerm(DEFAULT_LANGUAGE).getTitle()));
            } else {
                p.add(", " + WordUtils.capitalizeFully(affiliations.get(c).getAddress().getCountry()));
            }
        }
    }
    doc.add(p);
    p.clear();
    doc.add(Chunk.NEWLINE);
    // <Corresponding Authors>
    p = new Paragraph();
    p.setAlignment(Element.ALIGN_JUSTIFIED);
    p.add(new Phrase(getText("rtf.authors") + ": ", fontTitle));
    p.setFont(font);
    boolean isFirst = true;
    if (exists(eml.getResourceCreator())) {
        if (exists(eml.getResourceCreator().getFirstName())) {
            p.add(eml.getResourceCreator().getFirstName() + " ");
        }
        p.add(eml.getResourceCreator().getLastName());
        if (exists(eml.getResourceCreator().getEmail())) {
            p.add(" (" + eml.getResourceCreator().getEmail() + ")");
        }
        isFirst = false;
    }
    if (exists(eml.getMetadataProvider())) {
        boolean sameAsCreator = false;
        if (!isFirst) {
            sameAsCreator = equal(eml.getMetadataProvider().getAddress(), eml.getResourceCreator().getAddress())
                    && equal(eml.getMetadataProvider().getEmail(), eml.getResourceCreator().getEmail());
        }
        if (!sameAsCreator) {
            p.add(", ");
            if (exists(eml.getMetadataProvider().getFirstName())) {
                p.add(eml.getMetadataProvider().getFirstName() + " ");
            }
            p.add(eml.getMetadataProvider().getLastName());
            if (exists(eml.getMetadataProvider().getEmail())) {
                p.add(" (" + eml.getMetadataProvider().getEmail() + ")");
            }
        }
    }
    p.add(Chunk.NEWLINE);
    doc.add(p);
    p.clear();
}

From source file:org.intermine.bio.dataconversion.XenmineConverter.java

/**
 * //from  www.j a va2 s  .c om
 * @param reader
 * @throws Exception
 * @throws ObjectStoreException
 */
private void processZebrafishOrthologFile(Reader preader) throws Exception, ObjectStoreException {

    /* entrez/NCBI gene ID     
     * Xenbase GenePage     
     * symbol
     * name
     */
    System.out.println("Processing ZebraFish Ortholog file....");

    Iterator<?> tsvIter;
    try {
        tsvIter = FormattedTextParser.parseTabDelimitedReader(preader);
    } catch (Exception e) {
        throw new Exception("cannot parse file: " + preader.toString(), e);
    }

    while (tsvIter.hasNext()) {

        String[] line = (String[]) tsvIter.next();

        if (line.length < 4) {
            LOG.error("Couldn't process line. Expected 4 cols, but was " + line.length);
            continue;
        }

        String humanIdentifier = line[0].trim();
        String xenopusIdentifier = line[1].trim().substring(12);

        if (StringUtils.isEmpty(xenopusIdentifier) || StringUtils.isEmpty(humanIdentifier)) {
            continue;
        }

        String gene2 = getGene(humanIdentifier, "7955");

        HashSet geneIds = genesPageName.get(xenopusIdentifier);
        Iterator it = geneIds.iterator();

        while (it.hasNext()) {

            String geneId = (String) it.next();
            Item gene1 = genes.get(geneId);
            if (gene1 != null && gene2 != null) { //lot of trouble..why should this check be required..does not make sense..spend time debug
                processHomologues(gene1.getIdentifier(), gene2);
            }

        }

    }

    preader.close();

}

From source file:org.intermine.bio.dataconversion.XenmineConverter.java

/**
 * //from   w  w  w  . j  av a2 s . com
 * @param reader
 * @throws Exception
 * @throws ObjectStoreException
 */
private void processHumanOrthologFile(Reader preader) throws Exception, ObjectStoreException {

    /* entrez/NCBI gene ID     
     * Xenbase GenePage     
     * symbol
     * name
     */
    System.out.println("Processing Human Ortholog file....");

    Iterator<?> tsvIter;
    try {
        tsvIter = FormattedTextParser.parseTabDelimitedReader(preader);
    } catch (Exception e) {
        throw new Exception("cannot parse file: " + preader.toString(), e);
    }

    while (tsvIter.hasNext()) {

        String[] line = (String[]) tsvIter.next();

        if (line.length < 4) {
            LOG.error("Couldn't process line. Expected 4 cols, but was " + line.length);
            continue;
        }

        String humanIdentifier = line[0].trim();
        String xenopusIdentifier = line[1].trim().substring(12);

        if (StringUtils.isEmpty(xenopusIdentifier) || StringUtils.isEmpty(humanIdentifier)) {
            continue;
        }

        String gene2 = getGene(humanIdentifier, "9606");

        HashSet geneIds = genesPageName.get(xenopusIdentifier);
        Iterator it = geneIds.iterator();

        while (it.hasNext()) {

            String geneId = (String) it.next();
            Item gene1 = genes.get(geneId);
            if (gene1 != null && gene2 != null) { //lot of trouble..why should this check be required..does not make sense..spend time debug
                processHomologues(gene1.getIdentifier(), gene2);
            }

        }

    }

    preader.close();

}

From source file:org.intermine.bio.dataconversion.XenmineConverter.java

/**
 * //from   w  w w  . jav a2  s  .  com
 * @param reader
 * @throws Exception
 * @throws ObjectStoreException
 */

private void processMouseOrthologFile(Reader preader) throws Exception, ObjectStoreException {

    /* entrez/NCBI gene ID     
     * Xenbase GenePage     
     * symbol
     * name
     */

    System.out.println("Processing Mouse Ortholog file....");

    Iterator<?> tsvIter;
    try {
        tsvIter = FormattedTextParser.parseTabDelimitedReader(preader);
    } catch (Exception e) {
        throw new Exception("cannot parse file: " + preader.toString(), e);
    }

    while (tsvIter.hasNext()) {

        String[] line = (String[]) tsvIter.next();

        if (line.length < 4) {
            LOG.error("Couldn't process line. Expected 4 cols, but was " + line.length);
            continue;
        }

        String mouseIdentifier = line[0].trim();
        String xenopusIdentifier = line[1].trim().substring(12);

        if (StringUtils.isEmpty(xenopusIdentifier) || StringUtils.isEmpty(mouseIdentifier)) {
            continue;
        }

        String gene2 = getGene(mouseIdentifier, "10090");

        HashSet geneIds = genesPageName.get(xenopusIdentifier);
        Iterator it = geneIds.iterator();

        while (it.hasNext()) {

            String geneId = (String) it.next();
            Item gene1 = genes.get(geneId);
            if (gene1 != null && gene2 != null) { //lot of trouble..why should this check be required..does not make sense..spend time debug
                processHomologues(gene1.getIdentifier(), gene2);
            }

        }

    }

    preader.close();

}

From source file:com.sun.faban.harness.webclient.ResultAction.java

/**
 * This method is responsible for uploading the runs to repository.
 * @param uploadSet/*from www  .  j a  v  a  2  s.com*/
 * @param replaceSet
 * @return HashSet
 * @throws java.io.IOException
 */
public static HashSet<String> uploadRuns(String[] runIds, HashSet<File> uploadSet, HashSet<String> replaceSet)
        throws IOException {
    // 3. Upload the run
    HashSet<String> duplicates = new HashSet<String>();

    // Prepare run id set for cross checking.
    HashSet<String> runIdSet = new HashSet<String>(runIds.length);
    for (String runId : runIds) {
        runIdSet.add(runId);
    }

    // Prepare the parts for the request.
    ArrayList<Part> params = new ArrayList<Part>();
    params.add(new StringPart("host", Config.FABAN_HOST));
    for (String replaceId : replaceSet) {
        params.add(new StringPart("replace", replaceId));
    }
    for (File jarFile : uploadSet) {
        params.add(new FilePart("jarfile", jarFile));
    }
    Part[] parts = new Part[params.size()];
    parts = params.toArray(parts);

    // Send the request for each reposotory.
    for (URL repository : Config.repositoryURLs) {
        URL repos = new URL(repository, "/controller/uploader/upload_runs");
        PostMethod post = new PostMethod(repos.toString());
        post.setRequestEntity(new MultipartRequestEntity(parts, post.getParams()));

        HttpClient client = new HttpClient();
        client.getHttpConnectionManager().getParams().setConnectionTimeout(5000);
        int status = client.executeMethod(post);

        if (status == HttpStatus.SC_FORBIDDEN)
            logger.warning("Server denied permission to upload run !");
        else if (status == HttpStatus.SC_NOT_ACCEPTABLE)
            logger.warning("Run origin error!");
        else if (status != HttpStatus.SC_CREATED)
            logger.warning(
                    "Server responded with status code " + status + ". Status code 201 (SC_CREATED) expected.");
        for (File jarFile : uploadSet) {
            jarFile.delete();
        }

        String response = post.getResponseBodyAsString();

        if (status == HttpStatus.SC_CREATED) {

            StringTokenizer t = new StringTokenizer(response.trim(), "\n");
            while (t.hasMoreTokens()) {
                String duplicateRun = t.nextToken().trim();
                if (duplicateRun.length() > 0)
                    duplicates.add(duplicateRun.trim());
            }

            for (Iterator<String> iter = duplicates.iterator(); iter.hasNext();) {
                String runId = iter.next();
                if (!runIdSet.contains(runId)) {
                    logger.warning("Unexpected archive response from " + repos + ": " + runId);
                    iter.remove();
                }
            }
        } else {
            logger.warning("Message from repository: " + response);
        }
    }
    return duplicates;
}

From source file:org.intermine.bio.dataconversion.XenmineConverter.java

/**
 * //  w w  w. jav a 2  s.co  m
 * @param reader
 * @throws Exception
 * @throws ObjectStoreException
 */

private void processSynFile(Reader preader) throws Exception, ObjectStoreException {

    /* Xenbase gene ID     
     * gene symbol    
     *  gene name     
     *  gene function -- will store as description    
     *  gene synonyms    
     *  JGI ID  -- missing from the file --not required
     */

    System.out.println("Processing Synonym file....");

    Iterator<?> tsvIter;
    try {
        tsvIter = FormattedTextParser.parseTabDelimitedReader(preader);
    } catch (Exception e) {
        throw new Exception("cannot parse file: " + preader.toString(), e);
    }

    while (tsvIter.hasNext()) {

        String[] line = (String[]) tsvIter.next();

        if (line.length < 5) {
            LOG.error("Couldn't process line. Expected 5 cols, but was " + line.length);
            continue;
        }

        String genePageId = line[0].trim().substring(12);
        String symbol = line[1].trim();
        String name = line[2].trim();
        String desc = line[3].trim();
        String synonyms = line[4].trim();

        HashSet geneIds = genesPageName.get(genePageId);
        Iterator it = geneIds.iterator();

        while (it.hasNext()) {

            String geneId = (String) it.next();
            Item gene = genes.get(geneId);

            if (gene != null) {

                if (desc != null && !StringUtils.isEmpty(desc)) {
                    gene.setAttribute("briefDescription", desc);
                }

                if (synonyms != null && !StringUtils.isEmpty(synonyms)) {

                    gene.setAttribute("alias", synonyms);

                    if (synonyms.indexOf("|") != -1) {
                        String[] syns = synonyms.split("\\|");
                        for (int i = 0; i < syns.length; i++) {
                            getSynonym(gene.getIdentifier(), syns[i]);
                        }
                    } else {
                        getSynonym(gene.getIdentifier(), synonyms);
                    }

                }

            } else {
                System.out
                        .println("gene page id for a gene that is not laoded in the prev file.." + genePageId);
            }

        }

    }

    preader.close();

}