Example usage for java.util Set removeAll

List of usage examples for java.util Set removeAll

Introduction

In this page you can find the example usage for java.util Set removeAll.

Prototype

boolean removeAll(Collection<?> c);

Source Link

Document

Removes from this set all of its elements that are contained in the specified collection (optional operation).

Usage

From source file:org.openmrs.module.patientaccesscontrol.api.impl.PatientAccessControlServiceImpl.java

private PatientAccess getPatientAccess() {
    Set<Integer> includePatients = new HashSet<Integer>();
    Set<Integer> excludePatients = new HashSet<Integer>();
    Set<Integer> explicitlyIncludedPatients = new HashSet<Integer>();
    boolean hasInclude = false;
    boolean checkAllAccessControls = checkAllAccessControls();
    for (Class<? extends AccessControlService> service : accessControlServices) {
        AccessControlService svc = Context.getService(service);
        List<Integer> ip = svc.getIncludedPatients();
        List<Integer> ep = svc.getExcludedPatients();
        if (ip != null) {
            hasInclude = true;/* ww  w.ja  va2s.  c om*/
            includePatients.addAll(ip);
        }
        excludePatients.addAll(ep);
        if (!checkAllAccessControls()) {
            explicitlyIncludedPatients.addAll(svc.getExplicitlyIncludedPatients());
        }
    }
    List<Integer> mustInclude = Context.getService(UserPatientService.class).getIncludedPatients();
    if (!hasInclude) {
        includePatients = null;
        if (!checkAllAccessControls) {
            excludePatients.removeAll(explicitlyIncludedPatients);
        }
    } else {
        if (!checkAllAccessControls) {
            includePatients.addAll(explicitlyIncludedPatients);
            excludePatients.removeAll(includePatients);
        }
        includePatients.addAll(mustInclude);
    }
    excludePatients.removeAll(mustInclude);
    return new PatientAccess(includePatients, excludePatients);
}

From source file:de.uni_potsdam.hpi.asg.logictool.mapping.SequenceBasedAndGateDecomposer.java

private void removeCandidates(SortedSet<IOBehaviour> sequencesFront, SortedSet<IOBehaviour> sequencesBack,
        Set<IOBehaviour> newSequences, Set<IOBehaviour> rmSequences) {
    removeSubSequences(sequencesFront, sequencesBack, newSequences, rmSequences); //new->front,back ; set rm
    sequencesBack.removeAll(rmSequences);
    sequencesFront.removeAll(rmSequences);
    newSequences.removeAll(rmSequences);
    if (rmSequences.size() > 0) {
        rmSub += rmSequences.size();//from w  w  w  .ja v  a 2s .c o m
        logger.debug("rmSub removed " + rmSequences.size() + " candidates");
    }
    //      checkFalling(newSequences, rmSequences, term, relevant, partitions); //set rm
    //      sequencesBack.removeAll(rmSequences);
    //      sequencesFront.removeAll(rmSequences);
    newSequences.clear();
    //      if(rmSequences.size() > 0) {
    //         rmFall += rmSequences.size();
    //         logger.debug("chkFall removed " + rmSequences.size() + " candidates");
    //      }
}

From source file:com.aurel.track.fieldType.fieldChange.apply.CustomMultipleSelectFieldChangeApply.java

/**
 * Sets the workItemBean's attribute/*from  ww  w .  ja  v a2s  .com*/
 * @param workItemContext
 * @param workItemBean
 * @param fieldID
 * @param parameterCode
 * @param value   
 * @return ErrorData if an error is found
 */
@Override
public List<ErrorData> setWorkItemAttribute(WorkItemContext workItemContext, TWorkItemBean workItemBean,
        Integer parameterCode, Object value) {
    if (getSetter() == FieldChangeSetters.SET_NULL || getSetter() == FieldChangeSetters.SET_REQUIRED) {
        return super.setWorkItemAttribute(workItemContext, workItemBean, parameterCode, value);
    }
    Object originalValue = workItemBean.getAttribute(activityType, parameterCode);
    Object[] originalSelections = null;
    if (originalValue != null) {
        try {
            //multiple values are loaded in the workItem as Object[], not as Integer[] !!! 
            originalSelections = (Object[]) originalValue;
        } catch (Exception e) {
            LOGGER.info(
                    "Getting the original object array value for " + value + " failed with " + e.getMessage());
            LOGGER.debug(ExceptionUtils.getStackTrace(e));
        }
    }
    Set<Integer> originalSet = new HashSet<Integer>();
    if (originalSelections != null && originalSelections.length > 0) {
        for (int i = 0; i < originalSelections.length; i++) {
            try {
                originalSet.add((Integer) originalSelections[i]);
            } catch (Exception e) {
                LOGGER.info("Transforming the original object value " + originalSelections[i]
                        + " to Integer failed with " + e.getMessage());
                LOGGER.debug(ExceptionUtils.getStackTrace(e));
            }
        }
    }
    Integer[] newValue = (Integer[]) value;
    Set<Integer> bulkSelectionsSet = GeneralUtils.createSetFromIntegerArr(newValue);
    switch (getSetter()) {
    case FieldChangeSetters.SET_TO:
        workItemBean.setAttribute(activityType, parameterCode, newValue);
        break;
    case FieldChangeSetters.ADD_ITEMS:
        originalSet.addAll(bulkSelectionsSet);
        workItemBean.setAttribute(activityType, parameterCode,
                GeneralUtils.createIntegerArrFromCollection(originalSet));
        break;
    case FieldChangeSetters.REMOVE_ITEMS:
        originalSet.removeAll(bulkSelectionsSet);
        workItemBean.setAttribute(activityType, parameterCode,
                GeneralUtils.createIntegerArrFromCollection(originalSet));
        break;
    default:
        break;
    }
    return null;
}

From source file:org.apache.solr.cloud.FullSolrCloudTest.java

void showDiff(SolrDocumentList a, SolrDocumentList b, String aName, String bName) {
    System.err.println("######" + aName + ": " + a);
    System.err.println("######" + bName + ": " + b);
    System.err.println("###### sizes=" + a.size() + "," + b.size());

    Set<Map> setA = new HashSet<Map>();
    for (SolrDocument sdoc : a) {
        setA.add(new HashMap(sdoc));
    }//  ww  w.  ja v  a 2  s .c o  m

    Set<Map> setB = new HashSet<Map>();
    for (SolrDocument sdoc : b) {
        setB.add(new HashMap(sdoc));
    }

    Set<Map> onlyInA = new HashSet<Map>(setA);
    onlyInA.removeAll(setB);
    Set<Map> onlyInB = new HashSet<Map>(setB);
    onlyInB.removeAll(setA);

    if (onlyInA.size() > 0) {
        System.err.println("###### Only in " + aName + ": " + onlyInA);
    }
    if (onlyInB.size() > 0) {
        System.err.println("###### Only in " + bName + ": " + onlyInB);
    }
}

From source file:au.org.ala.delta.key.Key.java

private Set<Character> getSpecimenAvailableCharacters(Specimen specimen, List<Character> includedCharacters) {
    Set<Character> availableChars = new HashSet<Character>(includedCharacters);

    availableChars.removeAll(specimen.getUsedCharacters());

    availableChars.removeAll(specimen.getInapplicableCharacters());

    return availableChars;
}

From source file:ca.phon.ipamap.IpaMap.java

private static void generateMissingGrids(IpaGrids grids) {
    // create a set of characters defined in the xml file
    final Set<Character> supportedChars = new HashSet<Character>();
    for (Grid g : grids.getGrid()) {
        for (Cell c : g.getCell()) {
            String cellData = c.getText();
            cellData = cellData.replaceAll("\\u25cc", "");
            supportedChars.add(cellData.charAt(0));
        }/*from  w  w  w .ja v  a 2  s . c  om*/
    }

    final IPATokens tokens = IPATokens.getSharedInstance();

    // generate 'Other consonants' section
    final Set<Character> cSet = tokens.getCharactersForType(IPATokenType.CONSONANT);
    cSet.addAll(tokens.getCharactersForType(IPATokenType.GLIDE));
    final int w = 2;
    final int h = 2;
    final int maxX = 40;

    cSet.removeAll(supportedChars);
    if (cSet.size() > 0) {
        final Grid cGrid = generateGrid(cSet, "Other Consonants", "", "", w, h, maxX);
        grids.getGrid().add(cGrid);
    }

    // generate Other Vowels
    final Set<Character> vSet = tokens.getCharactersForType(IPATokenType.VOWEL);
    vSet.removeAll(supportedChars);
    if (vSet.size() > 0) {
        final Grid vGrid = generateGrid(vSet, "Other Vowels", "", "", w, h, maxX);
        grids.getGrid().add(vGrid);
    }

    // prefix diacritics
    final Set<Character> pdSet = tokens.getCharactersForType(IPATokenType.PREFIX_DIACRITIC);
    pdSet.removeAll(supportedChars);
    if (pdSet.size() > 0) {
        final Grid pdGrid = generateGrid(pdSet, "Other Prefix Diacritics", "", "\u25cc", w, h, maxX);
        grids.getGrid().add(pdGrid);
    }

    // suffix diacritics
    final Set<Character> sdSet = tokens.getCharactersForType(IPATokenType.SUFFIX_DIACRITIC);
    sdSet.removeAll(supportedChars);
    if (sdSet.size() > 0) {
        final Grid sdGrid = generateGrid(sdSet, "Other Suffix Diacritics", "\u25cc", "", w, h, maxX);
        grids.getGrid().add(sdGrid);
    }

    // combining diacritics
    final Set<Character> cdSet = tokens.getCharactersForType(IPATokenType.COMBINING_DIACRITIC);
    cdSet.removeAll(supportedChars);
    if (cdSet.size() > 0) {
        final Grid cdGrid = generateGrid(cdSet, "Other Combining Diacritics", "\u25cc", "", w, h, maxX);
        grids.getGrid().add(cdGrid);
    }

    // tone diacritics
    final Set<Character> tSet = tokens.getCharactersForType(IPATokenType.TONE);
    tSet.removeAll(supportedChars);
    if (tSet.size() > 0) {
        final Grid tGrid = generateGrid(tSet, "Other Tone Diacritics", "\u25cc", "", w, h, maxX);
        grids.getGrid().add(tGrid);
    }

    // everything else...
    final Set<Character> everything = new HashSet<Character>(tokens.getCharacterSet());
    everything.removeAll(supportedChars);
    everything.removeAll(cSet);
    everything.removeAll(vSet);
    everything.removeAll(pdSet);
    everything.removeAll(tSet);
    everything.removeAll(sdSet);
    everything.removeAll(cdSet);

    if (everything.size() > 0) {
        final Grid eGrid = generateGrid(everything, "Other Symbols", "", "", w, h, maxX);
        grids.getGrid().add(eGrid);
    }
}

From source file:com.stratio.qa.specs.GivenGSpec.java

/**
 * Checks if there are any unused nodes in the cluster and returns the IP of one of them.
 * REQUIRES A PREVIOUSLY-ESTABLISHED SSH CONNECTION TO DCOS-CLI TO WORK
 *
 * @param hosts: list of IPs that will be investigated
 * @param envVar: environment variable name
 * @throws Exception//from  w  w  w.j  a  va2 s  .  co  m
 *
 */
@Given("^I save the IP of an unused node in hosts '(.+?)' in the in environment variable '(.+?)'?$")
public void getUnusedNode(String hosts, String envVar) throws Exception {
    Set<String> hostList = new HashSet(Arrays.asList(hosts.split(",")));

    //Get the list of currently used hosts
    executeCommand("dcos task | awk '{print $2}'", "foo", 0, "bar", null);
    String results = commonspec.getRemoteSSHConnection().getResult();
    Set<String> usedHosts = new HashSet(Arrays.asList(results.replaceAll("\r", "").split("\n")));

    //We get the nodes not being used
    hostList.removeAll(usedHosts);

    if (hostList.size() == 0) {
        throw new IllegalStateException("No unused nodes in the cluster.");
    } else {
        //Pick the first available node
        ThreadProperty.set(envVar, hostList.iterator().next());
    }
}

From source file:com.ggvaidya.scinames.dataset.BinomialChangesSceneController.java

private void calculateAllBinomialChanges() {
    potentialChanges.clear();//from  w  ww. j av  a 2 s .c o m
    changesByPotentialChange.clear();

    Dataset prevDataset = null;
    for (Dataset ds : project.getDatasets()) {
        if (prevDataset == null) {
            prevDataset = ds;
            continue;
        }

        // Step 1. Figure out which binomial names were added and removed.
        Set<Name> binomialNamesInPrev = prevDataset.getRecognizedNames(project).flatMap(n -> n.asBinomial())
                .collect(Collectors.toSet());
        Set<Name> binomialNamesInCurrent = ds.getRecognizedNames(project).flatMap(n -> n.asBinomial())
                .collect(Collectors.toSet());

        Set<Name> namesAdded = new HashSet<>(binomialNamesInCurrent);
        namesAdded.removeAll(binomialNamesInPrev);

        Set<Name> namesDeleted = new HashSet<>(binomialNamesInPrev);
        namesDeleted.removeAll(binomialNamesInCurrent);

        // Step 2. Map all changes involving binomial name changes to the
        // binomial names they involve.
        // 
        // Note that this means deliberately skipping changes that *don't* affect
        // binomial composition, such as if a form or variety is deleted but that
        // doesn't result in the binomial name changing.
        List<Change> datasetChanges = ds.getChanges(project).collect(Collectors.toList());
        Map<Name, Set<Change>> changesByBinomialName = new HashMap<>();

        for (Change ch : datasetChanges) {
            Set<Name> changeNames = ch.getAllNames();
            Set<Name> changeBinomialNames = changeNames.stream().flatMap(n -> n.asBinomial())
                    .collect(Collectors.toSet());

            boolean involvesAddedNames = changeBinomialNames.stream().anyMatch(n -> namesAdded.contains(n));
            boolean involvesDeletedNames = changeBinomialNames.stream().anyMatch(n -> namesDeleted.contains(n));

            if (involvesAddedNames || involvesDeletedNames) {
                // Oh goody, involves one of our binomial names.
                //
                // Record all the changes by binomial name
                for (Name binomialName : changeBinomialNames) {
                    if (!changesByBinomialName.containsKey(binomialName))
                        changesByBinomialName.put(binomialName, new HashSet<>());

                    changesByBinomialName.get(binomialName).add(ch);
                }

            } else {
                // This change is an error or involves non-binomial names only.
                // Ignore!
            }
        }

        // Step 3. Convert the additions and deletions into potential changes,
        // based on the changes they include.
        Set<Name> namesChanged = new HashSet<>(namesAdded);
        namesChanged.addAll(namesDeleted);

        Set<Change> changesSummarized = new HashSet<>();

        for (Name n : namesChanged) {
            Set<Change> allChangesAssociatedWithName = changesByBinomialName.get(n);

            // TODO: am I sure this is being handled correctly?
            if (allChangesAssociatedWithName == null)
                continue;

            Set<Change> changes = allChangesAssociatedWithName.stream()
                    // Don't summarize the same change into multiple changes
                    // (e.g. if A + B -> C, we don't want this to turn up three times,
                    //  under 'A', 'B' and 'C'
                    .filter(ch -> !changesSummarized.contains(ch)).collect(Collectors.toSet());

            // No changes left? Skip this name!
            if (changes.isEmpty())
                continue;

            changesSummarized.addAll(changes);

            PotentialChange potentialChange = new PotentialChange(ds,
                    (namesAdded.contains(n) ? ChangeType.ADDITION : ChangeType.DELETION),
                    (namesAdded.contains(n) ? Stream.empty() : Stream.of(n)),
                    (namesAdded.contains(n) ? Stream.of(n) : Stream.empty()),
                    BinomialChangesSceneController.class, "Created from " + changes.size() + " changes: "
                            + changes.stream().map(ch -> ch.toString()).collect(Collectors.joining(";")));

            // Now, by default, the potential change writes in a long creation note, but
            // we don't want that, do we?
            potentialChange.getProperties().put("created", potentialChange.getNote().orElse(""));
            potentialChange.getProperties().remove("note");

            Set<ChangeType> changeTypes = new HashSet<>();

            for (Change ch : changes) {
                changeTypes.add(ch.getType());

                potentialChange.fromProperty().addAll(ch.getFrom());
                potentialChange.toProperty().addAll(ch.getTo());

                Optional<String> currentNote = potentialChange.getNote();
                Optional<String> changeNote = ch.getNote();

                if (currentNote.isPresent() && changeNote.isPresent()) {
                    potentialChange.noteProperty().set(currentNote.get() + "; " + changeNote.get());

                } else if (!currentNote.isPresent() && changeNote.isPresent()) {
                    potentialChange.noteProperty().set(changeNote.get());

                } else {
                    // Nothing to get hung about.
                }
            }

            // Finally, figure out this potential change's type.
            if (changeTypes.size() == 1)
                potentialChange.typeProperty().set(changeTypes.iterator().next());
            else {
                potentialChange.typeProperty().set(ChangeType.COMPLEX);
            }

            // All done!
            potentialChanges.add(potentialChange);
            changesByPotentialChange.put(potentialChange, changes);
        }

        // Ready for next!
        prevDataset = ds;
    }
}

From source file:de.powerstaff.business.service.impl.WrongDataServiceImpl.java

private void processFreelancer(File aReportFile) throws FileNotFoundException, ParseException {

    File theDBOhneProfil = new File(aReportFile, "Freiberufler_mit_Code_ohne_Profil.csv");
    File theFreelancerOhneNewsletter = new File(aReportFile, "Freiberufler_ohne_Newsletter.csv");
    File theFreelancerMitHomepageOhneKontakt = new File(aReportFile,
            "Freiberufler_mit_Homepage_ohne_Kontakt.csv");
    File theFreelancerForNewsletter = new File(aReportFile, "Freiberufler_fr_Newsletter.csv");
    File theProfileOhneDB = new File(aReportFile, "Profile_ohne_Datenbankeintrag.csv");
    File theProfileDoppelterCode = new File(aReportFile, "Profile_Kodierung_doppelt.csv");

    PrintWriter theDBOhneProfilWriter = null;
    PrintWriter theFreelancerOhneNewsletterWriter = null;
    PrintWriter theFreelancerMitHomepageOhneKontaktWriter = null;
    PrintWriter theFreelancerForNewsletterWriter = null;
    PrintWriter theProfileOhneDBWriter = null;
    PrintWriter theProfileDoppelterCodeWriter = null;

    FreelancerBackingBeanDataModel theModel = new FreelancerBackingBeanDataModel();

    try {//from   w w  w  .  j  ava2  s. co  m

        theProfileDoppelterCodeWriter = new PrintWriter(theProfileDoppelterCode);

        theDBOhneProfilWriter = new PrintWriter(theDBOhneProfil);
        theFreelancerOhneNewsletterWriter = new PrintWriter(theFreelancerOhneNewsletter);
        theFreelancerMitHomepageOhneKontaktWriter = new PrintWriter(theFreelancerMitHomepageOhneKontakt);
        theFreelancerForNewsletterWriter = new PrintWriter(theFreelancerForNewsletter);
        theProfileOhneDBWriter = new PrintWriter(theProfileOhneDB);

        theDBOhneProfilWriter.println("Kodierung;Name;Vorname;Kreditor");
        theFreelancerOhneNewsletterWriter.println("Kodierung;Name;Vorname;Mail");
        theFreelancerMitHomepageOhneKontaktWriter.println("Kodierung;Name;Vorname;Homepage");
        theFreelancerForNewsletterWriter.println(
                "Krzel;Name;Vorname;Titel;eMail;Eintrag in Kreditor;Verfgbarkeit;Homepage;letzter Kontakt;Status;Xing;Gulp");
        theProfileOhneDBWriter.println("Kodierung;Dateinamen");
        theProfileDoppelterCodeWriter.println("Kodierung;Dateinamen");

        boolean newsletterEnabled = systemParameterService.isNewsletterEnabled();
        Set<String> theMails = new HashSet<String>();
        Date theStartDate = null;

        DateFormat theDateFormat = new SimpleDateFormat("dd.MM.yyyy");

        if (newsletterEnabled) {
            theStartDate = theDateFormat.parse(systemParameterService.getStartDateForNotInNewsletter());

            for (NewsletterMail theMail : websiteDao.getConfirmedMails()) {
                theMails.add(theMail.getMail().toLowerCase());
            }

        }

        Session theSession = sessionFactory.getCurrentSession();
        int theFetchSize = 100;
        int theLogCount = theFetchSize * 10;

        Query theQuery = theSession.createQuery("from Freelancer");
        theQuery.setFetchSize(theFetchSize);
        ScrollableResults theResults = theQuery.scroll(ScrollMode.FORWARD_ONLY);
        int counter = 0;

        Set<String> theKnownCodes = new HashSet<String>();

        while (theResults.next()) {
            Freelancer theFreelancer = (Freelancer) theResults.get(0);

            String theCode = theFreelancer.getCode();
            if (!StringUtils.isEmpty(theCode)) {
                theCode = theCode.toLowerCase();
                theKnownCodes.add(theCode);

                Set<File> theFiles = fsCache.getFilesForCode(theCode);
                if ((theFiles == null || theFiles.size() == 0)) {
                    theDBOhneProfilWriter.println(theCode + ";" + saveString(theFreelancer.getName1()) + ";"
                            + saveString(theFreelancer.getName2()) + ";"
                            + saveString(theFreelancer.getKreditorNr()));
                }
            }

            List<FreelancerContact> theMailContacts = theFreelancer.getEMailContacts();
            List<FreelancerContact> theWebContacts = theFreelancer.getWebContacts();

            Date theLastContact = theFreelancer.getLastContactDate();

            if (!theFreelancer.isContactforbidden()) {

                String theMail = null;
                for (FreelancerContact theContact : theMailContacts) {
                    if (StringUtils.isEmpty(theMail)
                            && "eMail".equalsIgnoreCase(theContact.getType().getDescription())) {
                        theMail = theContact.getValue();
                    }
                }
                String theWeb = "";
                for (FreelancerContact theContact : theWebContacts) {
                    if (StringUtils.isEmpty(theWeb)
                            && "Web".equalsIgnoreCase(theContact.getType().getDescription())) {
                        theWeb = theContact.getValue();
                    }
                }
                String theGulp = "";
                for (FreelancerContact theContact : theWebContacts) {
                    if (StringUtils.isEmpty(theWeb)
                            && "Gulp".equalsIgnoreCase(theContact.getType().getDescription())) {
                        theGulp = theContact.getValue();
                    }
                }

                String theXing = "";
                for (FreelancerContact theContact : theWebContacts) {
                    if (StringUtils.isEmpty(theWeb)
                            && "Xing".equalsIgnoreCase(theContact.getType().getDescription())) {
                        theXing = theContact.getValue();
                    }
                }

                String theAvailable = "";
                Date theAvailability = theFreelancer.getAvailabilityAsDate();
                if (theAvailability != null) {
                    theAvailable = theDateFormat.format(theAvailability);
                }

                theFreelancerForNewsletterWriter.print(saveString(theFreelancer.getCode()));
                theFreelancerForNewsletterWriter.print(";");
                theFreelancerForNewsletterWriter.print(saveString(theFreelancer.getName1()));
                theFreelancerForNewsletterWriter.print(";");
                theFreelancerForNewsletterWriter.print(saveString(theFreelancer.getName2()));
                theFreelancerForNewsletterWriter.print(";");
                theFreelancerForNewsletterWriter.print(saveString(theFreelancer.getTitel()));
                theFreelancerForNewsletterWriter.print(";");
                theFreelancerForNewsletterWriter.print(saveString(theMail));
                theFreelancerForNewsletterWriter.print(";");
                theFreelancerForNewsletterWriter.print(saveString(theFreelancer.getKreditorNr()));
                theFreelancerForNewsletterWriter.print(";");
                theFreelancerForNewsletterWriter.print(saveString(theAvailable));
                theFreelancerForNewsletterWriter.print(";");
                theFreelancerForNewsletterWriter.print(saveString(theWeb));
                theFreelancerForNewsletterWriter.print(";");
                theFreelancerForNewsletterWriter.print(saveString(theLastContact));
                theFreelancerForNewsletterWriter.print(";");
                theFreelancerForNewsletterWriter
                        .print(saveString(theModel.getStatusAsString(theFreelancer.getStatus())));
                theFreelancerForNewsletterWriter.print(";");
                theFreelancerForNewsletterWriter.print(saveString(theXing));
                theFreelancerForNewsletterWriter.print(";");
                theFreelancerForNewsletterWriter.print(saveString(theGulp));
                theFreelancerForNewsletterWriter.println();
            }

            if (newsletterEnabled) {

                if (theLastContact != null && !theFreelancer.isContactforbidden()) {

                    String theMail = "";

                    boolean hasMail = false;
                    for (FreelancerContact theContact : theMailContacts) {
                        theMail = theContact.getValue();
                        if (theMails.contains(theMail.toLowerCase())) {
                            hasMail = true;
                        }
                    }

                    if (!hasMail) {
                        theFreelancerOhneNewsletterWriter.println(theFreelancer.getCode() + ";"
                                + theFreelancer.getName1() + ";" + theFreelancer.getName2() + ";" + theMail);
                    }
                }
            }

            if (theLastContact == null) {

                boolean hasHomepage = false;
                String theHomepage = null;
                for (FreelancerContact theContact : theWebContacts) {
                    theHomepage = theContact.getValue();
                    hasHomepage = true;
                }

                if (hasHomepage) {
                    theFreelancerMitHomepageOhneKontaktWriter.println(theFreelancer.getCode() + ";"
                            + theFreelancer.getName1() + ";" + theFreelancer.getName2() + ";" + theHomepage);
                }

            }

            if (counter % theLogCount == 0) {
                LOGGER.info("Processing record " + counter);
            }

            if (counter % theFetchSize == 0) {

                LOGGER.debug("Flushing session");
                theSession.clear();
            }
            counter++;
        }

        Set<String> theCodesFromFiles = new HashSet<String>();
        theCodesFromFiles.addAll(fsCache.getKnownCodes());
        for (String theCode : theCodesFromFiles) {
            Set<File> theFiles = fsCache.getFilesForCode(theCode);
            if (theFiles != null && theFiles.size() > 1) {
                // Doppelter Code
                StringBuilder theBuilder = new StringBuilder();
                for (File theFile : theFiles) {
                    if (theBuilder.length() > 0) {
                        theBuilder.append(";");
                    }
                    theBuilder.append(theFile.toString());
                }
                theProfileDoppelterCodeWriter.println(theCode + ";" + theBuilder);
            }
        }

        theCodesFromFiles.removeAll(theKnownCodes);

        for (String theCode : theCodesFromFiles) {
            Set<File> theFiles = fsCache.getFilesForCode(theCode);
            if (theFiles != null) {
                for (File theFile : theFiles) {
                    theProfileOhneDBWriter.println(theCode + ";" + theFile);
                }
            }
        }
    } catch (Exception e) {
        LOGGER.error("Error processing freelancer", e);
    } finally {
        IOUtils.closeQuietly(theDBOhneProfilWriter);
        IOUtils.closeQuietly(theFreelancerOhneNewsletterWriter);
        IOUtils.closeQuietly(theFreelancerMitHomepageOhneKontaktWriter);
        IOUtils.closeQuietly(theFreelancerForNewsletterWriter);
        IOUtils.closeQuietly(theProfileOhneDBWriter);
        IOUtils.closeQuietly(theProfileDoppelterCodeWriter);
    }
}

From source file:beast.evolution.tree.SimpleRandomTree.java

public void doTheWork() {
    // find taxon sets we are dealing with
    taxonSets = new ArrayList<>();
    m_bounds = new ArrayList<>();
    distributions = new ArrayList<>();
    taxonSetIDs = new ArrayList<>();
    List<Boolean> onParent = new ArrayList<>();
    lastMonophyletic = 0;/*  w  w w  .  jav  a  2s . com*/

    if (taxaInput.get() != null) {
        sTaxa.addAll(taxaInput.get().getTaxaNames());
    } else {
        sTaxa.addAll(m_taxonset.get().asStringList());
    }

    // pick up constraints from outputs, m_inititial input tree and output tree, if any
    List<MRCAPrior> calibrations = new ArrayList<MRCAPrior>();
    calibrations.addAll(calibrationsInput.get());

    // pick up constraints in m_initial tree
    for (final Object plugin : getOutputs()) {
        if (plugin instanceof MRCAPrior && !calibrations.contains(plugin)) {
            calibrations.add((MRCAPrior) plugin);
        }
    }

    if (m_initial.get() != null) {
        for (final Object plugin : m_initial.get().getOutputs()) {
            if (plugin instanceof MRCAPrior && !calibrations.contains(plugin)) {
                calibrations.add((MRCAPrior) plugin);
            }
        }
    }

    for (final MRCAPrior prior : calibrations) {
        final TaxonSet taxonSet = prior.taxonsetInput.get();
        if (taxonSet != null && !prior.onlyUseTipsInput.get()) {
            final Set<String> bTaxa = new LinkedHashSet<>();
            if (taxonSet.asStringList() == null) {
                taxonSet.initAndValidate();
            }
            for (final String sTaxonID : taxonSet.asStringList()) {

                if (!sTaxa.contains(sTaxonID)) {
                    throw new IllegalArgumentException(
                            "Taxon <" + sTaxonID + "> could not be found in list of taxa. Choose one of "
                                    + Arrays.toString(sTaxa.toArray(new String[sTaxa.size()])));
                }
                bTaxa.add(sTaxonID);
            }
            final ParametricDistribution distr = prior.distInput.get();
            final Bound bounds = new Bound();
            if (distr != null) {
                List<BEASTInterface> plugins = new ArrayList<>();
                distr.getPredecessors(plugins);
                for (int i = plugins.size() - 1; i >= 0; i--) {
                    plugins.get(i).initAndValidate();
                }
                try {
                    final double offset = distr.offsetInput.get();
                    bounds.lower = Math.max(distr.inverseCumulativeProbability(0.0) + offset, 0.0);
                    bounds.upper = distr.inverseCumulativeProbability(1.0) + offset;
                    assert bounds.lower <= bounds.upper;
                } catch (MathException e) {
                    Log.warning
                            .println("Could not set bounds in SimpleRandomTree::doTheWork : " + e.getMessage());
                }
            }

            if (prior.isMonophyleticInput.get() || bTaxa.size() == 1) {
                // add any monophyletic constraint
                boolean isDuplicate = false;
                for (int k = 0; k < lastMonophyletic; ++k) {
                    // assert prior.useOriginateInput.get().equals(onParent.get(k)) == (prior.useOriginateInput.get() == onParent.get(k));
                    if (bTaxa.size() == taxonSets.get(k).size() && bTaxa.equals(taxonSets.get(k))
                            && prior.useOriginateInput.get().equals(onParent.get(k))) {
                        if (distr != null) {
                            if (distributions.get(k) == null) {
                                distributions.set(k, distr);
                                m_bounds.set(k, bounds);
                                taxonSetIDs.set(k, prior.getID());
                            }
                        }
                        isDuplicate = true;
                    }
                }
                if (!isDuplicate) {
                    taxonSets.add(lastMonophyletic, bTaxa);
                    distributions.add(lastMonophyletic, distr);
                    onParent.add(lastMonophyletic, prior.useOriginateInput.get());
                    m_bounds.add(lastMonophyletic, bounds);
                    taxonSetIDs.add(lastMonophyletic, prior.getID());
                    lastMonophyletic++;
                }
            } else {
                // only calibrations with finite bounds are added
                if (!Double.isInfinite(bounds.lower) || !Double.isInfinite(bounds.upper)) {
                    taxonSets.add(bTaxa);
                    distributions.add(distr);
                    m_bounds.add(bounds);
                    taxonSetIDs.add(prior.getID());
                    onParent.add(prior.useOriginateInput.get());
                }
            }
        }
    }

    if (ICC) {
        for (int i = 0; i < lastMonophyletic; i++) {
            final Set<String> ti = taxonSets.get(i);
            for (int j = i + 1; j < lastMonophyletic; j++) {
                final Set<String> tj = taxonSets.get(j);
                boolean i_in_j = tj.containsAll(ti);
                boolean j_in_i = ti.containsAll(tj);
                if (i_in_j || j_in_i) {
                    boolean ok = true;
                    if (i_in_j && j_in_i) {
                        ok = (boolean) (onParent.get(i)) != (boolean) onParent.get(j);
                    }
                    assert ok : "" + i + ' ' + j + ' ' + ' ' + taxonSetIDs.get(i) + ' ' + taxonSetIDs.get(j);
                } else {
                    Set<String> tmp = new HashSet<>(tj);
                    tmp.retainAll(ti);
                    assert tmp.isEmpty();
                }
            }
        }
    }

    // assume all calibration constraints are Monophyletic
    // TODO: verify that this is a reasonable assumption
    lastMonophyletic = taxonSets.size();

    // sort constraints in increasing set inclusion order, i.e. such that if taxon set i is subset of taxon set j, then i < j
    for (int i = 0; i < lastMonophyletic; i++) {
        for (int j = i + 1; j < lastMonophyletic; j++) {

            final Set<String> taxai = taxonSets.get(i);
            final Set<String> taxaj = taxonSets.get(j);
            Set<String> intersection = new LinkedHashSet<>(taxai);
            intersection.retainAll(taxaj);

            if (intersection.size() > 0) {
                final boolean bIsSubset = taxai.containsAll(taxaj);
                final boolean bIsSubset2 = taxaj.containsAll(taxai);
                // sanity check: make sure either
                // o taxonset1 is subset of taxonset2 OR
                // o taxonset1 is superset of taxonset2 OR
                // o taxonset1 does not intersect taxonset2
                if (!(bIsSubset || bIsSubset2)) {
                    throw new IllegalArgumentException(
                            "333: Don't know how to generate a Random Tree for taxon sets that intersect, "
                                    + "but are not inclusive. Taxonset "
                                    + (taxonSetIDs.get(i) == null ? taxai : taxonSetIDs.get(i)) + " and "
                                    + (taxonSetIDs.get(j) == null ? taxaj : taxonSetIDs.get(j)));
                }
                // swap i & j if b1 subset of b2. If equal sub-sort on 'useOriginate'
                if (bIsSubset && (!bIsSubset2 || (onParent.get(i) && !onParent.get(j)))) {
                    swap(taxonSets, i, j);
                    swap(distributions, i, j);
                    swap(m_bounds, i, j);
                    swap(taxonSetIDs, i, j);
                    swap(onParent, i, j);
                }
            }
        }
    }

    if (ICC) {
        for (int i = 0; i < lastMonophyletic; i++) {
            final Set<String> ti = taxonSets.get(i);
            for (int j = i + 1; j < lastMonophyletic; j++) {
                final Set<String> tj = taxonSets.get(j);
                boolean ok = tj.containsAll(ti);
                if (ok) {
                    ok = !tj.equals(ti) || (!onParent.get(i) && onParent.get(j));
                    assert ok : "" + i + ' ' + j + ' ' + tj.equals(ti) + ' ' + taxonSetIDs.get(i) + ' '
                            + taxonSetIDs.get(j);
                } else {
                    Set<String> tmp = new HashSet<>(tj);
                    tmp.retainAll(ti);
                    assert tmp.isEmpty();
                }
            }
        }
    }

    for (int i = 0; i < lastMonophyletic; i++) {
        if (onParent.get(i)) {
            // make sure it is after constraint on node itself, if such exists
            assert (!(i + 1 < lastMonophyletic && taxonSets.get(i).equals(taxonSets.get(i + 1))
                    && onParent.get(i) && !onParent.get(i + 1)));
            // find something to attach to ....
            // find enclosing clade, if any. pick a non-intersecting clade in the enclosed without an onParent constraint, or one whose
            // onParent constraint is overlapping.
            final Set<String> iTaxa = taxonSets.get(i);
            int j = i + 1;
            Set<String> enclosingTaxa = sTaxa;
            {
                String someTaxon = iTaxa.iterator().next();
                for (/**/; j < lastMonophyletic; j++) {
                    if (taxonSets.get(j).contains(someTaxon)) {
                        enclosingTaxa = taxonSets.get(j);
                        break;
                    }
                }
            }
            final int enclosingIndex = (j == lastMonophyletic) ? j : j;
            Set<String> candidates = new HashSet<>(enclosingTaxa);
            candidates.removeAll(iTaxa);
            Set<Integer> candidateClades = new HashSet<>(5);
            List<String> canTaxa = new ArrayList<>();
            for (String c : candidates) {
                for (int k = enclosingIndex - 1; k >= 0; --k) {
                    if (taxonSets.get(k).contains(c)) {
                        if (!candidateClades.contains(k)) {
                            if (onParent.get(k)) {
                                if (!intersecting(m_bounds.get(k), m_bounds.get(i))) {
                                    break;
                                }
                            } else {
                                if (!(m_bounds.get(k).lower <= m_bounds.get(i).lower)) {
                                    break;
                                }
                            }
                            candidateClades.add(k);
                        }
                        break;
                    }
                    if (k == 0) {
                        canTaxa.add(c);
                    }
                }
            }

            final int sz1 = canTaxa.size();
            final int sz2 = candidateClades.size();

            if (sz1 + sz2 == 0 && i + 1 == enclosingIndex) {
                final Bound ebound = m_bounds.get(enclosingIndex);
                ebound.restrict(m_bounds.get(i));
            } else {
                assert sz1 + sz2 > 0;
                // prefer taxa over clades (less chance of clades useOriginate clashing)
                final int k = Randomizer.nextInt(sz1 > 0 ? sz1 : sz2);
                Set<String> connectTo;
                int insertPoint;
                if (k < sz1) {
                    // from taxa
                    connectTo = new HashSet<>(1);
                    connectTo.add(canTaxa.get(k));
                    insertPoint = i + 1;
                } else {
                    // from clade
                    final Iterator<Integer> it = candidateClades.iterator();
                    for (j = 0; j < k - sz1 - 1; ++j) {
                        it.next();
                    }
                    insertPoint = it.next();
                    connectTo = new HashSet<>(taxonSets.get(insertPoint));
                    insertPoint = Math.max(insertPoint, i) + 1;
                }

                final HashSet<String> cc = new HashSet<String>(connectTo);

                connectTo.addAll(taxonSets.get(i));
                if (!connectTo.equals(enclosingTaxa) || enclosingTaxa == sTaxa) { // equal when clade already exists

                    taxonSets.add(insertPoint, connectTo);
                    distributions.add(insertPoint, distributions.get(i));
                    onParent.add(insertPoint, false);
                    m_bounds.add(insertPoint, m_bounds.get(i));
                    final String tid = taxonSetIDs.get(i);
                    taxonSetIDs.add(insertPoint, tid);
                    lastMonophyletic += 1;
                } else {
                    // we lose distribution i :(
                    final Bound ebound = m_bounds.get(enclosingIndex);
                    ebound.restrict(m_bounds.get(i));
                }
            }
            if (true) {
                taxonSets.set(i, new HashSet<>());
                distributions.set(i, null);
                m_bounds.set(i, new Bound());
                final String tid = taxonSetIDs.get(i);
                if (tid != null) {
                    taxonSetIDs.set(i, "was-" + tid);
                }
            }
        }
    }

    {
        int icur = 0;
        for (int i = 0; i < lastMonophyletic; ++i, ++icur) {
            final Set<String> ti = taxonSets.get(i);
            if (ti.isEmpty()) {
                icur -= 1;
            } else {
                if (icur < i) {
                    taxonSets.set(icur, taxonSets.get(i));
                    distributions.set(icur, distributions.get(i));
                    m_bounds.set(icur, m_bounds.get(i));
                    taxonSetIDs.set(icur, taxonSetIDs.get(i));
                    onParent.set(icur, onParent.get(i));
                }
            }
        }
        taxonSets.subList(icur, lastMonophyletic).clear();
        distributions.subList(icur, lastMonophyletic).clear();
        m_bounds.subList(icur, lastMonophyletic).clear();
        taxonSetIDs.subList(icur, lastMonophyletic).clear();
        onParent.subList(icur, lastMonophyletic).clear();

        lastMonophyletic = icur;
    }

    if (ICC) {
        for (int i = 0; i < lastMonophyletic; i++) {
            final Set<String> ti = taxonSets.get(i);
            for (int j = i + 1; j < lastMonophyletic; j++) {
                final Set<String> tj = taxonSets.get(j);
                boolean ok = tj.containsAll(ti);
                if (ok) {
                    ok = !tj.equals(ti) || (!onParent.get(i) && onParent.get(j));
                    assert ok : "" + i + ' ' + j + ' ' + taxonSetIDs.get(i) + ' ' + taxonSetIDs.get(j);
                } else {
                    Set<String> tmp = new HashSet<>(tj);
                    tmp.retainAll(ti);
                    assert tmp.isEmpty();
                }
            }
        }
    }

    // map parent child relationships between mono clades. nParent[i] is the immediate parent clade of i, if any. An immediate parent is the
    // smallest superset of i, children[i] is a list of all clades which have i as a parent.
    // The last one, standing for the virtual "root" of all monophyletic clades is not associated with any actual clade
    final int[] nParent = new int[lastMonophyletic];
    children = new List[lastMonophyletic + 1];
    for (int i = 0; i < lastMonophyletic + 1; i++) {
        children[i] = new ArrayList<>();
    }
    for (int i = 0; i < lastMonophyletic; i++) {
        int j = i + 1;
        while (j < lastMonophyletic && !taxonSets.get(j).containsAll(taxonSets.get(i))) {
            j++;
        }
        nParent[i] = j;
        children[j].add(i);
    }

    // make sure upper bounds of a child does not exceed the upper bound of its parent
    for (int i = lastMonophyletic - 1; i >= 0; --i) {
        if (nParent[i] < lastMonophyletic) {
            if (m_bounds.get(i).upper > m_bounds.get(nParent[i]).upper) {
                m_bounds.get(i).upper = m_bounds.get(nParent[i]).upper - 1e-100;
                assert m_bounds.get(i).lower <= m_bounds.get(i).upper : i;
            }
        }
    }

    nodeCount = 2 * sTaxa.size() - 1;
    boundPerNode = new Bound[nodeCount];
    distPerNode = new ParametricDistribution[nodeCount];

    buildTree(sTaxa);
    assert nextNodeNr == nodeCount : "" + nextNodeNr + ' ' + nodeCount;

    double bm = branchMeanInput.get();

    if (bm < 0) {
        double maxMean = 0;

        for (ParametricDistribution distr : distPerNode) {
            if (distr != null) {
                double m = distr.getMean();
                if (maxMean < m)
                    maxMean = m;
            }
        }
        if (maxMean > 0) {
            double s = 0;
            for (int i = 2; i <= nodeCount; ++i) {
                s += 1.0 / i;
            }
            bm = s / maxMean;
        }
    }

    double rate = 1 / (bm < 0 ? 1 : bm);
    boolean succ = false;
    int ntries = 6;
    final double epsi = 0.01 / rate;
    double clamp = 1 - clampInput.get();
    while (!succ && ntries > 0) {
        try {
            succ = setHeights(rate, false, epsi, clamp);
        } catch (ConstraintViolatedException e) {
            throw new RuntimeException("Constraint failed: " + e.getMessage());
        }
        --ntries;
        rate *= 2;
        clamp /= 2;
    }
    if (!succ) {
        try {
            succ = setHeights(rate, true, 0, 0);
        } catch (ConstraintViolatedException e) {
            throw new RuntimeException("Constraint failed: " + e.getMessage());
        }
    }
    assert succ;

    internalNodeCount = sTaxa.size() - 1;
    leafNodeCount = sTaxa.size();

    HashMap<String, Integer> taxonToNR = null;
    // preserve node numbers where possible
    if (m_initial.get() != null) {
        taxonToNR = new HashMap<>();
        for (Node n : m_initial.get().getExternalNodes()) {
            taxonToNR.put(n.getID(), n.getNr());
        }
    }
    // re-assign node numbers
    setNodesNrs(root, 0, new int[1], taxonToNR);

    initArrays();
}