Example usage for java.util List retainAll

List of usage examples for java.util List retainAll

Introduction

In this page you can find the example usage for java.util List retainAll.

Prototype

boolean retainAll(Collection<?> c);

Source Link

Document

Retains only the elements in this list that are contained in the specified collection (optional operation).

Usage

From source file:org.phenotips.export.internal.DataToCellConverter.java

public DataSection prenatalPerinatalHistoryHeader(Set<String> enabledFields) throws Exception {
    String sectionName = "prenatalPerinatalHistory";
    List<String> fields = new ArrayList<>(
            Arrays.asList("gestation", "prenatal_development", "assistedReproduction_fertilityMeds",
                    "assistedReproduction_iui", "ivf", "icsi", "assistedReproduction_surrogacy",
                    "assistedReproduction_donoregg", "assistedReproduction_donorsperm", "apgar1", "apgar5"));
    fields.retainAll(enabledFields);
    Set<String> fieldSet = new HashSet<>(fields);
    this.enabledHeaderIdsBySection.put(sectionName, fieldSet);
    if (fields.isEmpty()) {
        return null;
    }//from  w  w  w .  j a  v a2 s  .co  m

    DataSection headerSection = new DataSection();

    List<String> apgarFields = new ArrayList<>(Arrays.asList("apgar1", "apgar5"));
    List<String> assitedReproductionFields = new ArrayList<>(Arrays.asList("assistedReproduction_fertilityMeds",
            "assistedReproduction_iui", "ivf", "icsi", "assistedReproduction_surrogacy",
            "assistedReproduction_donoregg", "assistedReproduction_donorsperm"));
    apgarFields.retainAll(fieldSet);
    assitedReproductionFields.retainAll(fieldSet);
    int apgarOffset = apgarFields.size();
    // there used to be a +1 for the offset
    int assistedReproductionOffset = assitedReproductionFields.size();
    int bottomY = (apgarOffset > 0 || assistedReproductionOffset > 0) ? 2 : 1;

    int hX = 0;
    for (String fieldId : fields) {
        DataCell headerCell = new DataCell(
                this.translationManager
                        .translate("phenotips.export.excel.label.prenatalPerinatalHistory." + fieldId),
                hX, bottomY, StyleOption.HEADER);
        headerSection.addCell(headerCell);
        hX++;
    }
    if (apgarOffset > 0) {
        DataCell headerCell = new DataCell(
                this.translationManager
                        .translate("phenotips.export.excel.label.prenatalPerinatalHistory.apgarScore"),
                hX - apgarOffset, 1, StyleOption.HEADER);
        headerSection.addCell(headerCell);
    }
    if (assistedReproductionOffset > 0) {
        DataCell headerCell = new DataCell(
                this.translationManager.translate(
                        "phenotips.export.excel.label.prenatalPerinatalHistory.assistedReproduction"),
                hX - apgarOffset - assistedReproductionOffset, 1, StyleOption.HEADER);
        headerSection.addCell(headerCell);
    }
    DataCell headerCell = new DataCell(
            this.translationManager.translate("phenotips.export.excel.label.prenatalPerinatalHistory"), 0, 0,
            StyleOption.LARGE_HEADER);
    headerCell.addStyle(StyleOption.HEADER);
    headerSection.addCell(headerCell);

    return headerSection;
}

From source file:org.opencms.staticexport.CmsAfterPublishStaticExportHandler.java

/**
 * Does the actual static export.<p>
 *  /*from   w  w  w  .j  a  v a 2  s  . c  om*/
 * @param resources a list of CmsPublishedREsources to start the static export with
 * @param report an <code>{@link I_CmsReport}</code> instance to print output message, or <code>null</code> to write messages to the log file
 *       
 * @throws CmsException in case of errors accessing the VFS
 * @throws IOException in case of errors writing to the export output stream
 * @throws ServletException in case of errors accessing the servlet 
 */
public void doExportAfterPublish(List<CmsPublishedResource> resources, I_CmsReport report)
        throws CmsException, IOException, ServletException {

    boolean templatesFound;

    // export must be done in the context of the export user 
    // this will always use the root site
    CmsObject cmsExportObject = OpenCms.initCmsObject(OpenCms.getDefaultUsers().getUserExport());

    List<CmsPublishedResource> resourcesToExport = getRelatedResources(cmsExportObject, resources);
    // first export all non-template resources
    templatesFound = exportNonTemplateResources(cmsExportObject, resourcesToExport, report);
    LOG.warn("finished exporting non-template resources. ");

    // export template resources (check "plainoptimization" setting)
    if ((templatesFound) || (!OpenCms.getStaticExportManager().getQuickPlainExport())) {
        CmsStaticExportManager manager = OpenCms.getStaticExportManager();

        // build resource filter set
        Set<String> resourceFilter = new HashSet<String>();
        for (CmsPublishedResource pubResource : resourcesToExport) {
            String rfsName = manager.getRfsName(cmsExportObject, pubResource.getRootPath());
            resourceFilter.add(rfsName.substring(manager.getRfsPrefixForRfsName(rfsName).length()));
        }

        long timestamp = 0;
        List<String> publishedTemplateResources;
        boolean newTemplateLinksFound;
        int linkMode = CmsStaticExportManager.EXPORT_LINK_WITHOUT_PARAMETER;
        do {
            // get all template resources which are potential candidates for a static export
            publishedTemplateResources = cmsExportObject.readStaticExportResources(linkMode, timestamp);
            if (publishedTemplateResources == null) {
                break;
            }
            newTemplateLinksFound = publishedTemplateResources.size() > 0;
            if (newTemplateLinksFound) {
                if (linkMode == CmsStaticExportManager.EXPORT_LINK_WITHOUT_PARAMETER) {
                    // first loop, switch mode to parameter links, leave the timestamp unchanged
                    linkMode = CmsStaticExportManager.EXPORT_LINK_WITH_PARAMETER;
                    // filter without parameter
                    publishedTemplateResources.retainAll(resourceFilter);
                } else {
                    // second and subsequent loops, only look for links not already exported
                    // this can only be the case for a link with parameters 
                    // that was present on a page also generated with parameters
                    timestamp = System.currentTimeMillis();
                    // filter with parameter
                    Iterator<String> itPubTemplates = publishedTemplateResources.iterator();
                    while (itPubTemplates.hasNext()) {
                        String rfsName = itPubTemplates.next();
                        if (!resourceFilter.contains(rfsName.substring(0, rfsName.lastIndexOf('_')))) {
                            itPubTemplates.remove();
                        }
                    }
                }
                // leave if no template left
                if (publishedTemplateResources.isEmpty()) {
                    break;
                }
                // export
                LOG.warn("exporting template resources. ");
                exportTemplateResources(cmsExportObject, publishedTemplateResources, report);
            }
            // if no new template links where found we are finished
        } while (newTemplateLinksFound);
    }
}

From source file:org.ejbca.core.model.era.RaMasterApiSessionBean.java

@SuppressWarnings("unchecked")
@Override//www  . ja v  a  2  s  .c om
public RaCertificateSearchResponse searchForCertificates(AuthenticationToken authenticationToken,
        RaCertificateSearchRequest request) {
    final RaCertificateSearchResponse response = new RaCertificateSearchResponse();
    final List<Integer> authorizedLocalCaIds = new ArrayList<>(
            caSession.getAuthorizedCaIds(authenticationToken));
    // Only search a subset of the requested CAs if requested
    if (!request.getCaIds().isEmpty()) {
        authorizedLocalCaIds.retainAll(request.getCaIds());
    }
    final List<String> issuerDns = new ArrayList<>();
    for (final int caId : authorizedLocalCaIds) {
        try {
            final String issuerDn = CertTools
                    .stringToBCDNString(StringTools.strip(caSession.getCAInfoInternal(caId).getSubjectDN()));
            issuerDns.add(issuerDn);
        } catch (CADoesntExistsException e) {
            log.warn("CA went missing during search operation. " + e.getMessage());
        }
    }
    if (issuerDns.isEmpty()) {
        // Empty response since there were no authorized CAs
        if (log.isDebugEnabled()) {
            log.debug("Client '" + authenticationToken
                    + "' was not authorized to any of the requested CAs and the search request will be dropped.");
        }
        return response;
    }
    // Check Certificate Profile authorization
    final List<Integer> authorizedCpIds = new ArrayList<>(
            certificateProfileSession.getAuthorizedCertificateProfileIds(authenticationToken, 0));
    final boolean accessAnyCpAvailable = authorizedCpIds
            .containsAll(certificateProfileSession.getCertificateProfileIdToNameMap().keySet());
    if (!request.getCpIds().isEmpty()) {
        authorizedCpIds.retainAll(request.getCpIds());
    }
    if (authorizedCpIds.isEmpty()) {
        // Empty response since there were no authorized Certificate Profiles
        if (log.isDebugEnabled()) {
            log.debug("Client '" + authenticationToken
                    + "' was not authorized to any of the requested CPs and the search request will be dropped.");
        }
        return response;
    }
    // Check End Entity Profile authorization
    final Collection<Integer> authorizedEepIds = new ArrayList<>(endEntityProfileSession
            .getAuthorizedEndEntityProfileIds(authenticationToken, AccessRulesConstants.VIEW_END_ENTITY));
    final boolean accessAnyEepAvailable = authorizedEepIds
            .containsAll(endEntityProfileSession.getEndEntityProfileIdToNameMap().keySet());
    if (!request.getEepIds().isEmpty()) {
        authorizedEepIds.retainAll(request.getEepIds());
    }
    if (authorizedEepIds.isEmpty()) {
        // Empty response since there were no authorized End Entity Profiles
        if (log.isDebugEnabled()) {
            log.debug("Client '" + authenticationToken
                    + "' was not authorized to any of the requested EEPs and the search request will be dropped.");
        }
        return response;
    }
    final String subjectDnSearchString = request.getSubjectDnSearchString();
    final String subjectAnSearchString = request.getSubjectAnSearchString();
    final String usernameSearchString = request.getUsernameSearchString();
    final String serialNumberSearchStringFromDec = request.getSerialNumberSearchStringFromDec();
    final String serialNumberSearchStringFromHex = request.getSerialNumberSearchStringFromHex();
    final StringBuilder sb = new StringBuilder(
            "SELECT a.fingerprint FROM CertificateData a WHERE (a.issuerDN IN (:issuerDN))");
    if (!subjectDnSearchString.isEmpty() || !subjectAnSearchString.isEmpty() || !usernameSearchString.isEmpty()
            || !serialNumberSearchStringFromDec.isEmpty() || !serialNumberSearchStringFromHex.isEmpty()) {
        sb.append(" AND (");
        boolean firstAppended = false;
        if (!subjectDnSearchString.isEmpty()) {
            sb.append("a.subjectDN LIKE :subjectDN");
            firstAppended = true;
        }
        if (!subjectAnSearchString.isEmpty()) {
            if (firstAppended) {
                sb.append(" OR ");
            } else {
                firstAppended = true;
            }
            sb.append("a.subjectAltName LIKE :subjectAltName");
        }
        if (!usernameSearchString.isEmpty()) {
            if (firstAppended) {
                sb.append(" OR ");
            } else {
                firstAppended = true;
            }
            sb.append("a.username LIKE :username");
        }
        if (!serialNumberSearchStringFromDec.isEmpty()) {
            if (firstAppended) {
                sb.append(" OR ");
            } else {
                firstAppended = true;
            }
            sb.append("a.serialNumber LIKE :serialNumberDec");
        }
        if (!serialNumberSearchStringFromHex.isEmpty()) {
            if (firstAppended) {
                sb.append(" OR ");
            }
            sb.append("a.serialNumber LIKE :serialNumberHex");
        }
        sb.append(")");
    }
    // NOTE: notBefore is not indexed.. we might want to disallow such search.
    if (request.isIssuedAfterUsed()) {
        sb.append(" AND (a.notBefore > :issuedAfter)");
    }
    if (request.isIssuedBeforeUsed()) {
        sb.append(" AND (a.notBefore < :issuedBefore)");
    }
    if (request.isExpiresAfterUsed()) {
        sb.append(" AND (a.expireDate > :expiresAfter)");
    }
    if (request.isExpiresBeforeUsed()) {
        sb.append(" AND (a.expireDate < :expiresBefore)");
    }
    // NOTE: revocationDate is not indexed.. we might want to disallow such search.
    if (request.isRevokedAfterUsed()) {
        sb.append(" AND (a.revocationDate > :revokedAfter)");
    }
    if (request.isRevokedBeforeUsed()) {
        sb.append(" AND (a.revocationDate < :revokedBefore)");
    }
    if (!request.getStatuses().isEmpty()) {
        sb.append(" AND (a.status IN (:status))");
        if ((request.getStatuses().contains(CertificateConstants.CERT_REVOKED)
                || request.getStatuses().contains(CertificateConstants.CERT_ARCHIVED))
                && !request.getRevocationReasons().isEmpty()) {
            sb.append(" AND (a.revocationReason IN (:revocationReason))");
        }
    }
    // Don't constrain results to certain certificate profiles if root access is available and "any" CP is requested
    if (!accessAnyCpAvailable || !request.getCpIds().isEmpty()) {
        sb.append(" AND (a.certificateProfileId IN (:certificateProfileId))");
    }
    // Don't constrain results to certain end entity profiles if root access is available and "any" EEP is requested
    if (!accessAnyEepAvailable || !request.getEepIds().isEmpty()) {
        sb.append(" AND (a.endEntityProfileId IN (:endEntityProfileId))");
    }
    final Query query = entityManager.createQuery(sb.toString());
    query.setParameter("issuerDN", issuerDns);
    if (!accessAnyCpAvailable || !request.getCpIds().isEmpty()) {
        query.setParameter("certificateProfileId", authorizedCpIds);
    }
    if (!accessAnyEepAvailable || !request.getEepIds().isEmpty()) {
        query.setParameter("endEntityProfileId", authorizedEepIds);
    }
    if (log.isDebugEnabled()) {
        log.debug(" issuerDN: " + Arrays.toString(issuerDns.toArray()));
        if (!accessAnyCpAvailable || !request.getCpIds().isEmpty()) {
            log.debug(" certificateProfileId: " + Arrays.toString(authorizedCpIds.toArray()));
        } else {
            log.debug(" certificateProfileId: Any (even deleted) profile(s) due to root access.");
        }
        if (!accessAnyEepAvailable || !request.getEepIds().isEmpty()) {
            log.debug(" endEntityProfileId: " + Arrays.toString(authorizedEepIds.toArray()));
        } else {
            log.debug(" endEntityProfileId: Any (even deleted) profile(s) due to root access.");
        }
    }
    if (!subjectDnSearchString.isEmpty()) {
        if (request.isSubjectDnSearchExact()) {
            query.setParameter("subjectDN", subjectDnSearchString);
        } else {
            query.setParameter("subjectDN", "%" + subjectDnSearchString + "%");
        }
    }
    if (!subjectAnSearchString.isEmpty()) {
        if (request.isSubjectAnSearchExact()) {
            query.setParameter("subjectAltName", subjectAnSearchString);
        } else {
            query.setParameter("subjectAltName", "%" + subjectAnSearchString + "%");
        }
    }
    if (!usernameSearchString.isEmpty()) {
        if (request.isUsernameSearchExact()) {
            query.setParameter("username", usernameSearchString);
        } else {
            query.setParameter("username", "%" + usernameSearchString + "%");
        }
    }
    if (!serialNumberSearchStringFromDec.isEmpty()) {
        query.setParameter("serialNumberDec", serialNumberSearchStringFromDec);
        if (log.isDebugEnabled()) {
            log.debug(" serialNumberDec: " + serialNumberSearchStringFromDec);
        }
    }
    if (!serialNumberSearchStringFromHex.isEmpty()) {
        query.setParameter("serialNumberHex", serialNumberSearchStringFromHex);
        if (log.isDebugEnabled()) {
            log.debug(" serialNumberHex: " + serialNumberSearchStringFromHex);
        }
    }
    if (request.isIssuedAfterUsed()) {
        query.setParameter("issuedAfter", request.getIssuedAfter());
    }
    if (request.isIssuedBeforeUsed()) {
        query.setParameter("issuedBefore", request.getIssuedBefore());
    }
    if (request.isExpiresAfterUsed()) {
        query.setParameter("expiresAfter", request.getExpiresAfter());
    }
    if (request.isExpiresBeforeUsed()) {
        query.setParameter("expiresBefore", request.getExpiresBefore());
    }
    if (request.isRevokedAfterUsed()) {
        query.setParameter("revokedAfter", request.getRevokedAfter());
    }
    if (request.isRevokedBeforeUsed()) {
        query.setParameter("revokedBefore", request.getRevokedBefore());
    }
    if (!request.getStatuses().isEmpty()) {
        query.setParameter("status", request.getStatuses());
        if ((request.getStatuses().contains(CertificateConstants.CERT_REVOKED)
                || request.getStatuses().contains(CertificateConstants.CERT_ARCHIVED))
                && !request.getRevocationReasons().isEmpty()) {
            query.setParameter("revocationReason", request.getRevocationReasons());
        }
    }
    final int maxResults = Math.min(getGlobalCesecoreConfiguration().getMaximumQueryCount(),
            request.getMaxResults());
    query.setMaxResults(maxResults);
    /* Try to use the non-portable hint (depends on DB and JDBC driver) to specify how long in milliseconds the query may run. Possible behaviors:
     * - The hint is ignored
     * - A QueryTimeoutException is thrown
     * - A PersistenceException is thrown (and the transaction which don't have here is marked for roll-back)
     */
    final long queryTimeout = getGlobalCesecoreConfiguration().getMaximumQueryTimeout();
    if (queryTimeout > 0L) {
        query.setHint("javax.persistence.query.timeout", String.valueOf(queryTimeout));
    }
    final List<String> fingerprints;
    try {
        fingerprints = query.getResultList();
        for (final String fingerprint : fingerprints) {
            response.getCdws().add(certificateStoreSession.getCertificateData(fingerprint));
        }
        response.setMightHaveMoreResults(fingerprints.size() == maxResults);
        if (log.isDebugEnabled()) {
            log.debug("Certificate search query: " + sb.toString() + " LIMIT " + maxResults + " \u2192 "
                    + fingerprints.size() + " results. queryTimeout=" + queryTimeout + "ms");
        }
    } catch (QueryTimeoutException e) {
        // Query.toString() does not return the SQL query executed just a java object hash. If Hibernate is being used we can get it using:
        // query.unwrap(org.hibernate.Query.class).getQueryString()
        // We don't have access to hibernate when building this class though, all querying should be moved to the ejbca-entity package.
        // See ECA-5341
        String queryString = e.getQuery().toString();
        //            try {
        //                queryString = e.getQuery().unwrap(org.hibernate.Query.class).getQueryString();
        //            } catch (PersistenceException pe) {
        //                log.debug("Query.unwrap(org.hibernate.Query.class) is not supported by JPA provider");
        //            }
        log.info("Requested search query by " + authenticationToken + " took too long. Query was '"
                + queryString + "'. " + e.getMessage());
        response.setMightHaveMoreResults(true);
    } catch (PersistenceException e) {
        log.info("Requested search query by " + authenticationToken + " failed, possibly due to timeout. "
                + e.getMessage());
        response.setMightHaveMoreResults(true);
    }
    return response;
}

From source file:edu.stanford.muse.webapp.JSPHelper.java

/**
 * This used to be a VIP methods for muse. Now superseded by Searcher.java for ePADD.
 * handle query for term, sentiment, person, attachment, docNum, timeCluster
 * etc/*  ww w. j  av a  2s .c  o  m*/
 * note: date range selection is always ANDed
 * if only_apply_to_filtered_docs, looks at emailDocs, i.e. ones selected by
 * the current filter (if there is one)
 * if !only_apply_to_filtered_docs, looks at all docs in archive
 * note: only_apply_to_filtered_docs == true is not honored by lucene lookup
 * by term (callers need to filter by themselves)
 * note2: performance can be improved. e.g., if in AND mode, searches that
 * iterate through documents such as
 * selectDocByTag, getBlobsForAttachments, etc., can take the intermediate
 * resultDocs rather than allDocs.
 * set intersection/union can be done in place to the intermediate
 * resultDocs rather than create a new collection.
 * getDocsForAttachments can be called on the combined result of attachments
 * and attachmentTypes search, rather than individually.
 * note3: should we want options to allow user to choose whether to search
 * only in emails, only in attachments, or both?
 * also, how should we allow variants in combining multiple conditions.
 * there will be work in UI too.
 * note4: the returned resultBlobs may not be tight, i.e., it may include
 * blobs from docs that are not in the returned resultDocs.
 * but for docs that are in resultDocs, it should not include blobs that are
 * not hitting.
 * these extra blobs will not be seen since we only use this info for
 * highlighting blobs in resultDocs.
 */
public static Pair<Collection<Document>, Collection<Blob>> selectDocsWithHighlightAttachments(
        HttpServletRequest request, HttpSession session, boolean only_apply_to_filtered_docs,
        boolean or_not_and) throws UnsupportedEncodingException {
    // below are all the controls for selecting docs 
    String term = request.getParameter("term"); // search term
    String[] contact_ids = request.getParameterValues("contact");
    String[] persons = request.getParameterValues("person");
    String[] attachments = request.getParameterValues("attachment"); // actual attachment name

    String[] attachment_extensions = request.getParameterValues("attachment_extension");

    {
        // if attachment_types specified, parse them and add the values in them to attachment_extensions also
        // types are higher level (video, audio, etc.) and map to more than 1 extension
        String[] attachment_types = request.getParameterValues("attachment_type"); // will be something like ["pdf,doc", "ppt,pptx,key"]
        if (!Util.nullOrEmpty(attachment_types)) {
            // assemble all extensions in a list first
            List<String> list = new ArrayList<>();
            if (!Util.nullOrEmpty(attachment_extensions))
                list.addAll(Arrays.asList(attachment_extensions));

            for (String s : attachment_types)
                list.addAll(Util.tokenize(s, ","));
            // trim all spaces, then convert back to array
            list = list.stream().map(s -> s.trim()).collect(Collectors.toList());
            attachment_extensions = list.toArray(new String[list.size()]);
        }
    }

    String datasetId = request.getParameter("datasetId");
    String[] docIds = request.getParameterValues("docId");
    String[] folders = request.getParameterValues("folder");
    String sortByStr = request.getParameter("sort_by");
    Indexer.SortBy sortBy = Indexer.SortBy.RELEVANCE;
    if (!Util.nullOrEmpty(sortByStr)) {
        if ("relevance".equals(sortByStr.toLowerCase()))
            sortBy = Indexer.SortBy.RELEVANCE;
        else if ("recent".equals(sortByStr.toLowerCase()))
            sortBy = Indexer.SortBy.RECENT_FIRST;
        else if ("chronological".equals(sortByStr.toLowerCase()))
            sortBy = Indexer.SortBy.CHRONOLOGICAL_ORDER;
        else {
            log.warn("Unknown sort by option: " + sortBy);
        }
    }

    // compute date requirements. start/end_date are in yyyy/mm/dd format
    int yy = -1, end_yy = -1, mm = -1, end_mm = -1, dd = -1, end_dd = -1;

    String start_date = request.getParameter("start_date");
    if (!Util.nullOrEmpty(start_date)) {
        String[] ss = start_date.split("/");
        if (ss.length > 0) {
            yy = Util.getIntParam(ss[0], -1);
        }
        if (ss.length > 1) {
            mm = Util.getIntParam(ss[1], -1);
        }
        if (ss.length > 2) {
            dd = Util.getIntParam(ss[2], -1);
        }
    }

    String end_date = request.getParameter("end_date");
    if (!Util.nullOrEmpty(end_date)) {
        String[] ss = end_date.split("/");
        if (ss.length > 0) {
            end_yy = Util.getIntParam(ss[0], -1);
        }
        if (ss.length > 1) {
            end_mm = Util.getIntParam(ss[1], -1);
        }
        if (ss.length > 2) {
            end_dd = Util.getIntParam(ss[2], -1);
        }
    }

    //key to large array of docids in session
    //it possible to pass this array as the get request parameter, but is not scalable due to the post and get size limits of tomcat
    String dIdLKey = request.getParameter("dIdLKey");
    if (dIdLKey != null) {
        try {
            Set<String> docIdsLot = (Set<String>) session.getAttribute(dIdLKey);
            Set<String> dIds = new HashSet<String>();
            if (docIds != null)
                for (String docId : docIds)
                    dIds.add(docId);

            if (docIdsLot != null)
                for (String dId : docIdsLot)
                    dIds.add(dId);
            docIds = dIds.toArray(new String[dIds.size()]);
            //System.err.println("Found docIds in the session... read "+docIds.length+" docIds");
        } catch (ClassCastException e) {
            e.printStackTrace();
        }
    }
    String tag = request.getParameter("annotation"); // only one tag supported right now, will revisit if needed

    String[] directions = request.getParameterValues("direction");
    Set<String> directionsSet = new LinkedHashSet<String>();
    if (directions != null)
        for (String d : directions)
            directionsSet.add(d);
    boolean direction_in = directionsSet.contains("in");
    boolean direction_out = directionsSet.contains("out");

    String[] sentiments = request.getParameterValues("sentiment");
    int cluster = HTMLUtils.getIntParam(request, "timeCluster", -1);
    /** usually, there is 1 time cluster per month */

    Set<String> foldersSet = new LinkedHashSet<String>();
    if (folders != null)
        for (String f : folders)
            foldersSet.add(f);

    // a little bit of an asymmetry here, only one groupIdx is considered, can't be multiple
    int groupIdx = HTMLUtils.getIntParam(request, "groupIdx", Integer.MAX_VALUE);
    Archive archive = JSPHelper.getArchive(session);
    AddressBook addressBook = archive.addressBook;
    GroupAssigner groupAssigner = archive.groupAssigner;
    BlobStore attachmentsStore = archive.blobStore;

    Collection<Document> allDocs = getAllDocsAsSet(session, only_apply_to_filtered_docs);
    if (Util.nullOrEmpty(allDocs))
        return new Pair<Collection<Document>, Collection<Blob>>(new ArrayList<Document>(),
                new ArrayList<Blob>());

    //why are there two vars for sentiment and content indexer repns?
    //      Indexer sentiIndexer, indexer;
    //      indexer = sentiIndexer = archive.indexer;

    // the raw request param val is in 8859 encoding, interpret the bytes as utf instead

    /**
     * there is a little overlap between datasetId and docForDocIds.
     * probably datasetIds can be got rid of?
     */
    List<Document> docsForGroup = null, docsForDateRange = null, docsForNumbers = null, docsForFolder = null,
            docsForDirection = null, docsForCluster = null, docsForDocIds = null;
    Collection<Document> docsForTerm = null, docsForPersons = null, docsForSentiments = null, docsForTag = null,
            docsForAttachments = null, docsForAttachmentTypes = null, docsForDoNotTransfer = null,
            docsForTransferWithRestrictions = null, docsForReviewed = null, docsForRegex = null;
    Collection<Blob> blobsForAttachments = null, blobsForAttachmentTypes = null, blobsForTerm = null;

    if (!Util.nullOrEmpty(term)) {
        term = JSPHelper.convertRequestParamToUTF8(term);
        if (isRegexSearch(request)) {
            docsForTerm = new LinkedHashSet<Document>(IndexUtils.selectDocsByRegex(archive, allDocs, term));
            // TODO: regex search in attachments is not implemented yet
        } else {
            Indexer.QueryType qt = null;
            String searchType = request.getParameter("searchType");
            if ("correspondents".equals(searchType))
                qt = Indexer.QueryType.CORRESPONDENTS;
            else if ("subject".equals(searchType))
                qt = Indexer.QueryType.SUBJECT;
            else if ("original".equals(searchType))
                qt = Indexer.QueryType.ORIGINAL;
            else if ("regex".equals(searchType))
                qt = Indexer.QueryType.REGEX;
            else
                qt = Indexer.QueryType.FULL;

            Indexer.QueryOptions options = new Indexer.QueryOptions();
            options.setQueryType(qt);
            options.setSortBy(sortBy);

            docsForTerm = archive.docsForQuery(term, options);
            // also search blobs and merge result, but not for subject/corr. search
            if (!"correspondents".equals(searchType) && !"subject".equals(searchType)) {
                blobsForTerm = archive.blobsForQuery(term);
                Set<Document> blobDocsForTerm = (Set<Document>) EmailUtils
                        .getDocsForAttachments((Collection) allDocs, blobsForTerm);
                log.info("Blob docs for term: " + term + ", " + blobDocsForTerm.size() + ", blobs: "
                        + blobsForTerm.size());
                docsForTerm = Util.setUnion(docsForTerm, blobDocsForTerm);
            }
        }
    }

    if ("true".equals(request.getParameter("sensitive"))) {
        Indexer.QueryType qt = null;
        qt = Indexer.QueryType.PRESET_REGEX;
        docsForRegex = archive.docsForQuery(cluster, qt);
    }

    if (foldersSet.size() > 0) {
        docsForFolder = new ArrayList<Document>();
        for (Document d : allDocs) {
            EmailDocument ed = (EmailDocument) d;
            if (foldersSet.contains(ed.folderName))
                docsForFolder.add(ed);
        }
    }

    if ((direction_in || direction_out) && addressBook != null) {
        docsForDirection = new ArrayList<Document>();
        for (Document d : allDocs) {
            EmailDocument ed = (EmailDocument) d;
            int sent_or_received = ed.sentOrReceived(addressBook);
            if (direction_in)
                if (((sent_or_received & EmailDocument.RECEIVED_MASK) != 0) || sent_or_received == 0) // if sent_or_received == 0 => we neither directly recd. nor sent it (e.g. it could be received on a mailing list). so count it as received.
                    docsForDirection.add(ed);
            if (direction_out && (sent_or_received & EmailDocument.SENT_MASK) != 0)
                docsForDirection.add(ed);
        }
    }

    String doNotTransfer = request.getParameter("doNotTransfer");
    if (!Util.nullOrEmpty(doNotTransfer)) {
        boolean val = "true".equals(doNotTransfer);
        docsForDoNotTransfer = new LinkedHashSet<Document>();
        for (Document d : allDocs) {
            EmailDocument ed = (EmailDocument) d;
            if (ed.doNotTransfer == val)
                docsForDoNotTransfer.add(ed);
        }
    }

    String transferWithRestrictions = request.getParameter("transferWithRestrictions");
    if (!Util.nullOrEmpty(transferWithRestrictions)) {
        boolean val = "true".equals(transferWithRestrictions);
        docsForTransferWithRestrictions = new LinkedHashSet<Document>();
        for (Document d : allDocs) {
            EmailDocument ed = (EmailDocument) d;
            if (ed.transferWithRestrictions == val)
                docsForTransferWithRestrictions.add(ed);
        }
    }

    String reviewed = request.getParameter("reviewed");
    if (!Util.nullOrEmpty(reviewed)) {
        boolean val = "true".equals(reviewed);
        docsForReviewed = new LinkedHashSet<Document>();
        for (Document d : allDocs) {
            EmailDocument ed = (EmailDocument) d;
            if (ed.reviewed == val)
                docsForReviewed.add(ed);
        }
    }

    if (sentiments != null && sentiments.length > 0) {
        Lexicon lex = (Lexicon) getSessionAttribute(session, "lexicon");
        docsForSentiments = lex.getDocsWithSentiments(sentiments, archive.indexer, allDocs, cluster,
                request.getParameter("originalContentOnly") != null, sentiments);
    }

    // if (!Util.nullOrEmpty(tag))
    if (tag != null) // note: explicitly allowing tag=<empty> as a way to specify no tag.
    {
        docsForTag = Document.selectDocByTag(allDocs, tag, true);
    }
    if (cluster >= 0) {
        docsForCluster = new ArrayList<>(archive.docsForQuery(null, cluster, Indexer.QueryType.FULL)); // null for term returns all docs in cluster
    }

    if (persons != null || contact_ids != null) {
        persons = JSPHelper.convertRequestParamsToUTF8(persons);
        docsForPersons = IndexUtils.selectDocsByAllPersons(addressBook, (Collection) allDocs, persons,
                Util.toIntArray(contact_ids));
    }

    //Some docs with faulty date are assigned 1960/01/01
    if (end_yy >= 0 && yy >= 0) // date range
    {
        docsForDateRange = (List) IndexUtils.selectDocsByDateRange((Collection) allDocs, yy, mm, dd, end_yy,
                end_mm, end_dd);
        log.info("Found " + docsForDateRange.size() + " docs in range: [" + yy + "/" + mm + "/" + dd + " - ["
                + end_yy + "/" + end_mm + "/" + end_dd + "]");
    } else if (yy >= 0) // single month or year
    {
        docsForDateRange = IndexUtils.selectDocsByDateRange((Collection) allDocs, yy, mm, dd);
        log.info("Found " + docsForDateRange.size() + " docs beyond " + yy + "/" + mm + "/" + dd);
    }

    if (groupIdx != Integer.MAX_VALUE) {
        if (groupIdx >= groupAssigner.getSelectedGroups().size())
            groupIdx = -1; // must be the "None" group
        docsForGroup = (List) IndexUtils.getDocsForGroupIdx((Collection) allDocs, addressBook, groupAssigner,
                groupIdx);
    }

    if (!Util.nullOrEmpty(attachments)) {
        attachments = JSPHelper.convertRequestParamsToUTF8(attachments);
        blobsForAttachments = IndexUtils.getBlobsForAttachments(allDocs, attachments, attachmentsStore);
        docsForAttachments = (Set<Document>) EmailUtils.getDocsForAttachments((Collection) allDocs,
                blobsForAttachments);
    }

    if (!Util.nullOrEmpty(attachment_extensions)) {
        attachment_extensions = JSPHelper.convertRequestParamsToUTF8(attachment_extensions);
        blobsForAttachmentTypes = IndexUtils.getBlobsForAttachmentTypes(allDocs, attachment_extensions);
        docsForAttachmentTypes = (Set<Document>) EmailUtils.getDocsForAttachments((Collection) allDocs,
                blobsForAttachmentTypes);
    }

    if (!Util.nullOrEmpty(docIds)) {
        docsForDocIds = new ArrayList<>();
        for (String id : docIds) {
            Document d = archive.docForId(id);
            if (d != null)
                docsForDocIds.add(d);
        }
    }

    if (datasetId != null) {
        // note: these docNums have nothing to do with docIds of the docs.
        // they are just indexes into a dataset, which is a collection of docs from the result of some search.
        DataSet dataset = (DataSet) getSessionAttribute(session, datasetId);
        if (dataset != null)

        {
            String[] docNumbers = request.getParameterValues("docNum");
            if (docNumbers == null)
                docsForNumbers = dataset.getDocs();
            else
                docsForNumbers = (List) IndexUtils.getDocNumbers(dataset.getDocs(), docNumbers);
        }
    }

    // apply the OR or AND of the filters
    boolean initialized = false;
    List<Document> resultDocs;
    List<Blob> resultBlobs;

    // if its an AND selection, and we are applying only to filtered docs, start with it and intersect with the docs for each facet.
    // otherwise, start with nothing as an optimization, since there's no need to intersect with it.
    // the docs for each facet will always be a subset of archive's docs.
    if (only_apply_to_filtered_docs && !or_not_and && allDocs != null) {
        initialized = true;
        resultDocs = new ArrayList<>(allDocs);
    } else
        resultDocs = new ArrayList<>();

    if (docsForTerm != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForTerm);
        } else
            resultDocs.retainAll(docsForTerm);
    }

    if (docsForRegex != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForRegex);
        } else
            resultDocs.retainAll(docsForRegex);
    }

    if (docsForSentiments != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForSentiments);
        } else
            resultDocs = Util.listIntersection(resultDocs, docsForSentiments);
    }
    if (docsForTag != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForTag);
        } else
            resultDocs = Util.listIntersection(resultDocs, docsForTag);
    }

    if (docsForCluster != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForCluster);
        } else
            resultDocs.retainAll(docsForCluster);
    }

    if (docsForDocIds != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForDocIds);
        } else
            resultDocs.retainAll(docsForDocIds);
    }

    if (docsForPersons != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForPersons);
        } else
            resultDocs = Util.listIntersection(resultDocs, docsForPersons);
    }

    if (docsForDateRange != null) {
        // if (!initialized || or_not_and)
        // note: date range selection is always ANDed, regardless of or_not_and
        if (!initialized) {
            initialized = true;
            resultDocs.addAll(docsForDateRange);
        } else
            resultDocs.retainAll(docsForDateRange);
    }
    if (docsForFolder != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForFolder);
        } else
            resultDocs.retainAll(docsForFolder);
    }

    if (docsForDirection != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForDirection);
        } else
            resultDocs.retainAll(docsForDirection);
    }

    if (docsForDoNotTransfer != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForDoNotTransfer);
        } else
            resultDocs.retainAll(docsForDoNotTransfer);
    }

    if (docsForTransferWithRestrictions != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForTransferWithRestrictions);
        } else
            resultDocs.retainAll(docsForTransferWithRestrictions);
    }

    if (docsForReviewed != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForReviewed);
        } else
            resultDocs.retainAll(docsForReviewed);
    }

    if (docsForGroup != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForGroup);
        } else
            resultDocs.retainAll(docsForGroup);
    }

    if (docsForAttachments != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForAttachments);
        } else
            resultDocs.retainAll(docsForAttachments);
    }

    if (docsForAttachmentTypes != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForAttachmentTypes);
        } else
            resultDocs.retainAll(docsForAttachmentTypes);
    }

    if (docsForNumbers != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForNumbers);
        } else
            resultDocs.retainAll(docsForNumbers);
    }

    if (!initialized) {
        if (cluster >= 0)
            resultDocs = new ArrayList<Document>(archive.docsForQuery(null, cluster, Indexer.QueryType.FULL)); // means all docs in cluster x
        else {
            resultDocs = new ArrayList<Document>();
            resultDocs.addAll(allDocs); // if no filter, all docs are selected
        }
    }

    // compute resultBlobs
    if (or_not_and) {
        resultBlobs = Util.listUnion(blobsForAttachments, blobsForAttachmentTypes);
        resultBlobs = Util.listUnion(resultBlobs, blobsForTerm);
    } else {
        resultBlobs = Util.listIntersection(blobsForAttachments, blobsForAttachmentTypes);
        resultBlobs = Util.listIntersection(resultBlobs, blobsForTerm);
    }

    // we need to sort again if needed. by default, we're here assuming relevance based sort.
    // can't rely on indexer sort.
    // for 2 reasons:
    // 1. blobs vs. docs may not be sorted by date as they are retrieved separately from the index.
    // 2. there may be no search term -- the user can use this as a way to list all docs, but may still want sort by time
    if (sortBy == Indexer.SortBy.CHRONOLOGICAL_ORDER)
        Collections.sort(resultDocs);
    else if (sortBy == Indexer.SortBy.RECENT_FIRST) {
        Collections.sort(resultDocs);
        Collections.reverse(resultDocs);
    }

    return new Pair<Collection<Document>, Collection<Blob>>(resultDocs, resultBlobs);
}

From source file:org.opencms.ade.contenteditor.CmsContentService.java

/**
 * Reads the content definition for the given resource and locale.<p>
 *
 * @param file the resource file/*from  w ww .ja  v  a2  s  .  c om*/
 * @param content the XML content
 * @param entityId the entity id
 * @param locale the content locale
 * @param newLocale if the locale content should be created as new
 * @param mainLocale the main language to copy in case the element language node does not exist yet
 * @param editedLocaleEntity the edited locale entity
 *
 * @return the content definition
 *
 * @throws CmsException if something goes wrong
 */
private CmsContentDefinition readContentDefinition(CmsFile file, CmsXmlContent content, String entityId,
        Locale locale, boolean newLocale, Locale mainLocale, CmsEntity editedLocaleEntity) throws CmsException {

    long timer = 0;
    if (LOG.isDebugEnabled()) {
        timer = System.currentTimeMillis();
    }
    CmsObject cms = getCmsObject();
    List<Locale> availableLocalesList = OpenCms.getLocaleManager().getAvailableLocales(cms, file);
    if (!availableLocalesList.contains(locale)) {
        availableLocalesList.retainAll(content.getLocales());
        List<Locale> defaultLocales = OpenCms.getLocaleManager().getDefaultLocales(cms, file);
        Locale replacementLocale = OpenCms.getLocaleManager().getBestMatchingLocale(locale, defaultLocales,
                availableLocalesList);
        LOG.info("Can't edit locale " + locale + " of file " + file.getRootPath()
                + " because it is not configured as available locale. Using locale " + replacementLocale
                + " instead.");
        locale = replacementLocale;
        entityId = CmsContentDefinition.uuidToEntityId(file.getStructureId(), locale.toString());
    }

    if (CmsStringUtil.isEmptyOrWhitespaceOnly(entityId)) {
        entityId = CmsContentDefinition.uuidToEntityId(file.getStructureId(), locale.toString());
    }
    boolean performedAutoCorrection = checkAutoCorrection(cms, content);
    if (performedAutoCorrection) {
        content.initDocument();
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(Messages.get().getBundle().key(Messages.LOG_TAKE_UNMARSHALING_TIME_1,
                "" + (System.currentTimeMillis() - timer)));
    }
    CmsContentTypeVisitor visitor = new CmsContentTypeVisitor(cms, file, locale);
    if (LOG.isDebugEnabled()) {
        timer = System.currentTimeMillis();
    }
    visitor.visitTypes(content.getContentDefinition(), getWorkplaceLocale(cms));
    if (LOG.isDebugEnabled()) {
        LOG.debug(Messages.get().getBundle().key(Messages.LOG_TAKE_VISITING_TYPES_TIME_1,
                "" + (System.currentTimeMillis() - timer)));
    }
    CmsEntity entity = null;
    Map<String, String> syncValues = new HashMap<String, String>();
    Collection<String> skipPaths = new HashSet<String>();
    evaluateSyncLocaleValues(content, syncValues, skipPaths);
    if (content.hasLocale(locale) && newLocale) {
        // a new locale is requested, so remove the present one
        content.removeLocale(locale);
    }
    if (!content.hasLocale(locale)) {
        if ((mainLocale != null) && content.hasLocale(mainLocale)) {
            content.copyLocale(mainLocale, locale);
        } else {
            content.addLocale(cms, locale);
        }
        // sync the locale values
        if (!visitor.getLocaleSynchronizations().isEmpty() && (content.getLocales().size() > 1)) {
            for (Locale contentLocale : content.getLocales()) {
                if (!contentLocale.equals(locale)) {
                    content.synchronizeLocaleIndependentValues(cms, skipPaths, contentLocale);
                }
            }
        }
    }
    Element element = content.getLocaleNode(locale);
    if (LOG.isDebugEnabled()) {
        timer = System.currentTimeMillis();
    }
    entity = readEntity(content, element, locale, entityId, "", getTypeUri(content.getContentDefinition()),
            visitor, false, editedLocaleEntity);

    if (LOG.isDebugEnabled()) {
        LOG.debug(Messages.get().getBundle().key(Messages.LOG_TAKE_READING_ENTITY_TIME_1,
                "" + (System.currentTimeMillis() - timer)));
    }
    List<String> contentLocales = new ArrayList<String>();
    for (Locale contentLocale : content.getLocales()) {
        contentLocales.add(contentLocale.toString());
    }
    Locale workplaceLocale = OpenCms.getWorkplaceManager().getWorkplaceLocale(cms);
    TreeMap<String, String> availableLocales = new TreeMap<String, String>();
    for (Locale availableLocale : OpenCms.getLocaleManager().getAvailableLocales(cms, file)) {
        availableLocales.put(availableLocale.toString(), availableLocale.getDisplayName(workplaceLocale));
    }
    String title = cms.readPropertyObject(file, CmsPropertyDefinition.PROPERTY_TITLE, false).getValue();
    try {
        CmsGallerySearchResult searchResult = CmsGallerySearch.searchById(cms, file.getStructureId(), locale);
        title = searchResult.getTitle();
    } catch (CmsException e) {
        LOG.warn(e.getLocalizedMessage(), e);
    }
    String typeName = OpenCms.getResourceManager().getResourceType(file.getTypeId()).getTypeName();
    boolean autoUnlock = OpenCms.getWorkplaceManager().shouldAcaciaUnlock();
    Map<String, CmsEntity> entities = new HashMap<String, CmsEntity>();
    entities.put(entityId, entity);

    return new CmsContentDefinition(entityId, entities, visitor.getAttributeConfigurations(),
            visitor.getWidgetConfigurations(), visitor.getComplexWidgetData(), visitor.getTypes(),
            visitor.getTabInfos(), locale.toString(), contentLocales, availableLocales,
            visitor.getLocaleSynchronizations(), syncValues, skipPaths, title, cms.getSitePath(file), typeName,
            performedAutoCorrection, autoUnlock, getChangeHandlerScopes(content.getContentDefinition()));
}

From source file:org.dllearner.algorithms.qtl.experiments.PRConvergenceExperiment.java

/**
 * Split the SPARQL query and join the result set of each split. This
 * allows for the execution of more complex queries.
 * @param sparqlQuery// w w  w.  ja  v  a  2  s.co  m
 * @return
 */
private List<String> getResultSplitted(String sparqlQuery) {
    Query query = QueryFactory.create(sparqlQuery);
    logger.trace("Getting result set splitted for\n{}", query);

    List<Query> queries = QueryRewriter.split(query);

    List<String> resources = getResult(queries.remove(0).toString());
    queries.stream().map(q -> getResult(q.toString())).forEach(l -> resources.retainAll(l));

    return resources;
}

From source file:com.comcast.oscar.configurationfile.ConfigurationFileExport.java

/**
 * This method will remove all TopLevel TLV that are not defined in the Dictionary
 * Currently support 1 byte Length TLVs//from  w w  w. j  a v a 2s . c  o m
 */
private void removeNonDictionaryTopLevelTLV() {

    Boolean localDebug = Boolean.FALSE;

    /* Get TopLevel List*/
    List<Integer> liTopLevelDict = dsqDictionarySQLQueries.getTopLevelTLV();

    List<Integer> liTopLevelCFE = null;

    try {
        liTopLevelCFE = getTlvBuilder().getTopLevelTlvList();
    } catch (TlvException e) {
        e.printStackTrace();
    }

    /*This will create a single instance of each Type */
    liTopLevelCFE = new ArrayList<Integer>(new LinkedHashSet<Integer>(liTopLevelCFE));

    /*Remove Types that are not suppose to be There */
    liTopLevelCFE.retainAll(liTopLevelDict);

    if (debug | localDebug) {
        System.out.println("removeNonDictionaryTopLevelTLV() -> DICT: " + liTopLevelDict);
        System.out.println("removeNonDictionaryTopLevelTLV() -> CFE remove DICT: " + liTopLevelCFE);
    }

    /*Create new ByteArray*/
    bTLV = TlvBuilder.fetchTlv(liTopLevelCFE, bTLV);

}

From source file:com.cloud.deploy.DeploymentPlanningManagerImpl.java

private void checkForNonDedicatedResources(VirtualMachineProfile vmProfile, DataCenter dc, ExcludeList avoids) {
    boolean isExplicit = false;
    VirtualMachine vm = vmProfile.getVirtualMachine();

    // check if zone is dedicated. if yes check if vm owner has access to it.
    DedicatedResourceVO dedicatedZone = _dedicatedDao.findByZoneId(dc.getId());
    if (dedicatedZone != null && !_accountMgr.isRootAdmin(vmProfile.getOwner().getId())) {
        long accountDomainId = vmProfile.getOwner().getDomainId();
        long accountId = vmProfile.getOwner().getAccountId();

        // If a zone is dedicated to an account then all hosts in this zone
        // will be explicitly dedicated to
        // that account. So there won't be any shared hosts in the zone, the
        // only way to deploy vms from that
        // account will be to use explicit dedication affinity group.
        if (dedicatedZone.getAccountId() != null) {
            if (dedicatedZone.getAccountId().equals(accountId)) {
                return;
            } else {
                throw new CloudRuntimeException("Failed to deploy VM, Zone " + dc.getName()
                        + " not available for the user account " + vmProfile.getOwner());
            }//  w ww  . j  a v a2s .  c  om
        }

        // if zone is dedicated to a domain. Check owner's access to the
        // domain level dedication group
        if (!_affinityGroupService.isAffinityGroupAvailableInDomain(dedicatedZone.getAffinityGroupId(),
                accountDomainId)) {
            throw new CloudRuntimeException("Failed to deploy VM, Zone " + dc.getName()
                    + " not available for the user domain " + vmProfile.getOwner());
        }

    }

    // check affinity group of type Explicit dedication exists. If No put
    // dedicated pod/cluster/host in avoid list
    List<AffinityGroupVMMapVO> vmGroupMappings = _affinityGroupVMMapDao.findByVmIdType(vm.getId(),
            "ExplicitDedication");

    if (vmGroupMappings != null && !vmGroupMappings.isEmpty()) {
        isExplicit = true;
    }

    List<Long> allPodsInDc = _podDao.listAllPods(dc.getId());
    List<Long> allDedicatedPods = _dedicatedDao.listAllPods();
    allPodsInDc.retainAll(allDedicatedPods);

    List<Long> allClustersInDc = _clusterDao.listAllClusters(dc.getId());
    List<Long> allDedicatedClusters = _dedicatedDao.listAllClusters();
    allClustersInDc.retainAll(allDedicatedClusters);

    List<Long> allHostsInDc = _hostDao.listAllHosts(dc.getId());
    List<Long> allDedicatedHosts = _dedicatedDao.listAllHosts();
    allHostsInDc.retainAll(allDedicatedHosts);

    //Only when the type is instance VM and not explicitly dedicated.
    if (vm.getType() == VirtualMachine.Type.User && !isExplicit) {
        //add explicitly dedicated resources in avoidList

        avoids.addPodList(allPodsInDc);
        avoids.addClusterList(allClustersInDc);
        avoids.addHostList(allHostsInDc);
    }

    //Handle the Virtual Router Case
    //No need to check the isExplicit. As both the cases are handled.
    if (vm.getType() == VirtualMachine.Type.DomainRouter) {
        long vmAccountId = vm.getAccountId();
        long vmDomainId = vm.getDomainId();

        //Lists all explicitly dedicated resources from vm account ID or domain ID.
        List<Long> allPodsFromDedicatedID = new ArrayList<Long>();
        List<Long> allClustersFromDedicatedID = new ArrayList<Long>();
        List<Long> allHostsFromDedicatedID = new ArrayList<Long>();

        //Whether the dedicated resources belong to Domain or not. If not, it may belongs to Account or no dedication.
        List<AffinityGroupDomainMapVO> domainGroupMappings = _affinityGroupDomainMapDao
                .listByDomain(vmDomainId);

        //For temporary storage and indexing.
        List<DedicatedResourceVO> tempStorage;

        if (domainGroupMappings == null || domainGroupMappings.isEmpty()) {
            //The dedicated resource belongs to VM Account ID.

            tempStorage = _dedicatedDao.searchDedicatedPods(null, vmDomainId, vmAccountId, null,
                    new Filter(DedicatedResourceVO.class, "id", true, 0L, 1L)).first();

            for (DedicatedResourceVO vo : tempStorage) {
                allPodsFromDedicatedID.add(vo.getPodId());
            }

            tempStorage.clear();
            tempStorage = _dedicatedDao.searchDedicatedClusters(null, vmDomainId, vmAccountId, null,
                    new Filter(DedicatedResourceVO.class, "id", true, 0L, 1L)).first();

            for (DedicatedResourceVO vo : tempStorage) {
                allClustersFromDedicatedID.add(vo.getClusterId());
            }

            tempStorage.clear();
            tempStorage = _dedicatedDao.searchDedicatedHosts(null, vmDomainId, vmAccountId, null,
                    new Filter(DedicatedResourceVO.class, "id", true, 0L, 1L)).first();

            for (DedicatedResourceVO vo : tempStorage) {
                allHostsFromDedicatedID.add(vo.getHostId());
            }

            //Remove the dedicated ones from main list
            allPodsInDc.removeAll(allPodsFromDedicatedID);
            allClustersInDc.removeAll(allClustersFromDedicatedID);
            allHostsInDc.removeAll(allHostsFromDedicatedID);
        } else {
            //The dedicated resource belongs to VM Domain ID or No dedication.

            tempStorage = _dedicatedDao.searchDedicatedPods(null, vmDomainId, null, null,
                    new Filter(DedicatedResourceVO.class, "id", true, 0L, 1L)).first();

            for (DedicatedResourceVO vo : tempStorage) {
                allPodsFromDedicatedID.add(vo.getPodId());
            }

            tempStorage.clear();
            tempStorage = _dedicatedDao.searchDedicatedClusters(null, vmDomainId, null, null,
                    new Filter(DedicatedResourceVO.class, "id", true, 0L, 1L)).first();

            for (DedicatedResourceVO vo : tempStorage) {
                allClustersFromDedicatedID.add(vo.getClusterId());
            }

            tempStorage.clear();
            tempStorage = _dedicatedDao.searchDedicatedHosts(null, vmDomainId, null, null,
                    new Filter(DedicatedResourceVO.class, "id", true, 0L, 1L)).first();

            for (DedicatedResourceVO vo : tempStorage) {
                allHostsFromDedicatedID.add(vo.getHostId());
            }

            //Remove the dedicated ones from main list
            allPodsInDc.removeAll(allPodsFromDedicatedID);
            allClustersInDc.removeAll(allClustersFromDedicatedID);
            allHostsInDc.removeAll(allHostsFromDedicatedID);
        }

        //Add in avoid list or no addition if no dedication
        avoids.addPodList(allPodsInDc);
        avoids.addClusterList(allClustersInDc);
        avoids.addHostList(allHostsInDc);
    }
}

From source file:org.apache.ranger.plugin.policyengine.RangerPolicyRepository.java

private List<RangerPolicyEvaluator> getPolicyEvaluators(Map<String, RangerResourceTrie> resourceTrie,
        RangerAccessResource resource) {
    List<RangerPolicyEvaluator> ret = null;
    Set<String> resourceKeys = resource == null ? null : resource.getKeys();

    if (CollectionUtils.isNotEmpty(resourceKeys)) {
        boolean isRetModifiable = false;

        for (String resourceName : resourceKeys) {
            RangerResourceTrie trie = resourceTrie.get(resourceName);

            if (trie == null) { // if no trie exists for this resource level, ignore and continue to next level
                continue;
            }/*from  w w w . j  ava2s .  co  m*/

            List<RangerPolicyEvaluator> resourceEvaluators = trie
                    .getEvaluatorsForResource(resource.getValue(resourceName));

            if (CollectionUtils.isEmpty(resourceEvaluators)) { // no policies for this resource, bail out
                ret = null;
            } else if (ret == null) { // initialize ret with policies found for this resource
                ret = resourceEvaluators;
            } else { // remove policies from ret that are not in resourceEvaluators
                if (isRetModifiable) {
                    ret.retainAll(resourceEvaluators);
                } else {
                    final List<RangerPolicyEvaluator> shorterList;
                    final List<RangerPolicyEvaluator> longerList;

                    if (ret.size() < resourceEvaluators.size()) {
                        shorterList = ret;
                        longerList = resourceEvaluators;
                    } else {
                        shorterList = resourceEvaluators;
                        longerList = ret;
                    }

                    ret = new ArrayList<>(shorterList);
                    ret.retainAll(longerList);
                    isRetModifiable = true;
                }
            }

            if (CollectionUtils.isEmpty(ret)) { // if no policy exists, bail out and return empty list
                ret = null;
                break;
            }
        }
    }

    if (ret == null) {
        ret = Collections.emptyList();
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("<== RangerPolicyRepository.getPolicyEvaluators(" + resource.getAsString()
                + "): evaluatorCount=" + ret.size());
    }

    return ret;
}

From source file:ubic.gemma.analysis.expression.coexpression.GeneCoexpressionServiceImpl.java

/**
 * Get coexpression results using a pure gene2gene query (without visiting the probe2probe tables. This is generally
 * faster, probably even if we're only interested in data from a subset of the experiments.
 * //  w w  w  .ja v a  2s .  c  om
 * @param baseSet
 * @param eeIds Experiments to limit the results to (must not be null, and should already be security-filtered)
 * @param queryGenes
 * @param stringency
 * @param maxResults
 * @param queryGenesOnly return links among the query genes only.
 * @return
 */
private CoexpressionMetaValueObject getFilteredCannedAnalysisResults(ExpressionExperimentSet baseSet,
        Collection<Long> eeIds, Collection<Gene> queryGenes, int stringency, int maxResults,
        boolean queryGenesOnly) {

    if (queryGenes.isEmpty()) {
        throw new IllegalArgumentException("No genes in query");
    }

    List<ExpressionExperimentValueObject> eevos = getSortedEEvos(eeIds);

    if (eevos.isEmpty()) {
        throw new IllegalArgumentException("There are no usable experiments in the selected set");
    }

    /*
     * We get this prior to filtering so it matches the vectors stored with the analysis.
     */
    expressionExperimentSetService.thaw(baseSet);
    List<Long> positionToIDMap = Gene2GenePopulationServiceImpl
            .getPositionToIdMap(EntityUtils.getIds(baseSet.getExperiments()));

    /*
     * This set of links must be filtered to include those in the data sets being analyzed.
     */
    Map<Long, Collection<Gene2GeneCoexpression>> gg2gs = getRawCoexpression(queryGenes, stringency, maxResults,
            queryGenesOnly);

    List<Long> filteredEeIds = (List<Long>) EntityUtils.getIds(eevos);

    CoexpressionMetaValueObject result = initValueObject(queryGenes, eevos, true);

    List<CoexpressionValueObjectExt> ecvos = new ArrayList<CoexpressionValueObjectExt>();

    Collection<Gene2GeneCoexpression> seen = new HashSet<Gene2GeneCoexpression>();

    // queryGenes = geneService.thawLite( gg2gs.keySet() );

    // populate the value objects.
    StopWatch timer = new StopWatch();
    Collection<Gene> allUsedGenes = new HashSet<Gene>();
    for (Gene queryGene : queryGenes) {
        timer.start();

        if (!queryGene.getTaxon().equals(baseSet.getTaxon())) {
            throw new IllegalArgumentException(
                    "Mismatch between taxon for expression experiment set selected and gene queries");
        }

        allUsedGenes.add(queryGene);

        /*
         * For summary statistics
         */
        CountingMap<Long> supportCount = new CountingMap<Long>();
        Collection<Long> allSupportingDatasets = new HashSet<Long>();
        Collection<Long> allDatasetsWithSpecificProbes = new HashSet<Long>();
        Collection<Long> allTestedDataSets = new HashSet<Long>();

        int linksMetPositiveStringency = 0;
        int linksMetNegativeStringency = 0;

        Collection<Gene2GeneCoexpression> g2gs = gg2gs.get(queryGene.getId());

        assert g2gs != null;

        List<Long> relevantEEIdList = getRelevantEEidsForBitVector(positionToIDMap, g2gs);
        relevantEEIdList.retainAll(filteredEeIds);

        GeneValueObject queryGeneValueObject = new GeneValueObject(queryGene);

        HashMap<Gene, Collection<Gene2GeneCoexpression>> foundGenes = new HashMap<Gene, Collection<Gene2GeneCoexpression>>();

        // for queryGene get the interactions
        Map<Long, Gene2GeneProteinAssociation> proteinInteractionMap = this
                .getGene2GeneProteinAssociationForQueryGene(queryGene);

        Map<Long, TfGeneAssociation> regulatedBy = this.getTfGeneAssociationsforTargetGene(queryGene);
        Map<Long, TfGeneAssociation> regulates = this.getTfGeneAssociationsforTf(queryGene);

        if (timer.getTime() > 100) {
            log.info("Postprocess " + queryGene.getOfficialSymbol() + " Phase I: " + timer.getTime() + "ms");
        }
        timer.stop();
        timer.reset();
        timer.start();

        for (Gene2GeneCoexpression g2g : g2gs) {
            StopWatch timer2 = new StopWatch();
            timer2.start();

            Gene foundGene = g2g.getFirstGene().equals(queryGene) ? g2g.getSecondGene() : g2g.getFirstGene();

            allUsedGenes.add(foundGene);

            // FIXME Symptom fix for duplicate found genes
            // Keep track of the found genes that we can correctly identify
            // duplicates.
            // All keep the g2g object for debugging purposes.
            if (foundGenes.containsKey(foundGene)) {
                foundGenes.get(foundGene).add(g2g);
                log.warn("Duplicate gene found in coexpression results, skipping: " + foundGene
                        + " From analysis: " + g2g.getSourceAnalysis().getId());
                continue; // Found a duplicate gene, don't add to results
                          // just our debugging list

            }

            foundGenes.put(foundGene, new ArrayList<Gene2GeneCoexpression>());
            foundGenes.get(foundGene).add(g2g);

            CoexpressionValueObjectExt cvo = new CoexpressionValueObjectExt();

            /*
             * This Thaw is a big time sink and _should not_ be necessary.
             */
            // foundGene = geneService.thawLite( foundGene ); // db hit

            cvo.setQueryGene(queryGeneValueObject);
            cvo.setFoundGene(new GeneValueObject(foundGene));

            if (timer2.getTime() > 10)
                log.info("Coexp. Gene processing phase I:" + timer2.getTime() + "ms");
            timer2.stop();
            timer2.reset();
            timer2.start();

            populateInteractions(proteinInteractionMap, regulatedBy, regulates, foundGene, cvo);

            Collection<Long> testingDatasets = Gene2GenePopulationServiceImpl.getTestedExperimentIds(g2g,
                    positionToIDMap);
            testingDatasets.retainAll(filteredEeIds);

            /*
             * necesssary in case any were filtered out (for example, if this is a virtual analysis; or there were
             * 'troubled' ees. Note that 'supporting' includes 'non-specific' if they were recorded by the analyzer.
             */
            Collection<Long> supportingDatasets = Gene2GenePopulationServiceImpl.getSupportingExperimentIds(g2g,
                    positionToIDMap);

            // necessary in case any were filtered out.
            supportingDatasets.retainAll(filteredEeIds);

            cvo.setSupportingExperiments(supportingDatasets);

            Collection<Long> specificDatasets = Gene2GenePopulationServiceImpl.getSpecificExperimentIds(g2g,
                    positionToIDMap);

            /*
             * Specific probe EEids contains 1 even if the data set wasn't supporting.
             */
            specificDatasets.retainAll(supportingDatasets);

            int numTestingDatasets = testingDatasets.size();
            int numSupportingDatasets = supportingDatasets.size();

            /*
             * SANITY CHECKS
             */
            assert specificDatasets.size() <= numSupportingDatasets;
            assert numTestingDatasets >= numSupportingDatasets;
            assert numTestingDatasets <= eevos.size();

            cvo.setDatasetVector(
                    getDatasetVector(supportingDatasets, testingDatasets, specificDatasets, relevantEEIdList));

            /*
             * This check is necessary in case any data sets were filtered out. (i.e., we're not interested in the
             * full set of data sets that were used in the original analysis.
             */
            if (numSupportingDatasets < stringency) {
                continue;
            }

            allTestedDataSets.addAll(testingDatasets);

            int supportFromSpecificProbes = specificDatasets.size();
            if (g2g.getEffect() < 0) {
                cvo.setPosSupp(0);
                cvo.setNegSupp(numSupportingDatasets);
                if (numSupportingDatasets != supportFromSpecificProbes)
                    cvo.setNonSpecNegSupp(numSupportingDatasets - supportFromSpecificProbes);

                ++linksMetNegativeStringency;
            } else {
                cvo.setPosSupp(numSupportingDatasets);
                if (numSupportingDatasets != supportFromSpecificProbes)
                    cvo.setNonSpecPosSupp(numSupportingDatasets - supportFromSpecificProbes);
                cvo.setNegSupp(0);
                ++linksMetPositiveStringency;
            }
            cvo.setSupportKey(Math.max(cvo.getPosSupp(), cvo.getNegSupp()));
            cvo.setNumTestedIn(numTestingDatasets);

            for (Long id : supportingDatasets) {
                supportCount.increment(id);
            }

            cvo.setSortKey();

            /*
             * This check prevents links from being shown twice when we do "among query genes". We don't skip
             * entirely so we get the counts for the summary table populated correctly.
             */
            if (!seen.contains(g2g)) {
                ecvos.add(cvo);
            }

            seen.add(g2g);

            allSupportingDatasets.addAll(supportingDatasets);
            allDatasetsWithSpecificProbes.addAll(specificDatasets);

        }

        Collection<Long> geneIds = new ArrayList<Long>();

        for (Gene g : allUsedGenes) {

            geneIds.add(g.getId());

        }

        populateNodeDegree(ecvos, geneIds, allTestedDataSets);

        if (timer.getTime() > 1000) {
            log.info("Postprocess " + g2gs.size() + " results for " + queryGene.getOfficialSymbol()
                    + "Phase II: " + timer.getTime() + "ms");
        }
        timer.stop();
        timer.reset();
        timer.start();

        // This is only necessary for debugging purposes. Helps us keep
        // track of duplicate genes found above.
        if (log.isDebugEnabled()) {
            for (Gene foundGene : foundGenes.keySet()) {
                if (foundGenes.get(foundGene).size() > 1) {
                    log.debug("** DUPLICATE: " + foundGene.getOfficialSymbol()
                            + " found multiple times. Gene2Genes objects are: ");
                    for (Gene2GeneCoexpression g1g : foundGenes.get(foundGene)) {
                        log.debug(" ============ Gene2Gene Id: " + g1g.getId() + " 1st gene: "
                                + g1g.getFirstGene().getOfficialSymbol() + " 2nd gene: "
                                + g1g.getSecondGene().getOfficialSymbol() + " Source Analysis: "
                                + g1g.getSourceAnalysis().getId() + " # of dataSets: " + g1g.getNumDataSets());
                    }
                }
            }
        }

        CoexpressionSummaryValueObject summary = makeSummary(eevos, allTestedDataSets,
                allDatasetsWithSpecificProbes, linksMetPositiveStringency, linksMetNegativeStringency);
        result.getSummary().put(queryGene.getOfficialSymbol(), summary);

        generateDatasetSummary(eevos, result, supportCount, allSupportingDatasets, queryGene);

        /*
         * FIXME I'm lazy and rushed, so I'm using an existing field for this info; probably better to add another
         * field to the value object...
         */
        for (ExpressionExperimentValueObject eevo : eevos) {
            eevo.setExternalUri(AnchorTagUtil.getExpressionExperimentUrl(eevo.getId()));
        }

        Collections.sort(ecvos);
        getGoOverlap(ecvos, queryGene);

        timer.stop();
        if (timer.getTime() > 1000) {
            log.info("Postprocess " + g2gs.size() + " results for " + queryGene.getOfficialSymbol()
                    + " PhaseIII: " + timer.getTime() + "ms");
        }
        timer.reset();
    } // Over results.

    result.getKnownGeneResults().addAll(ecvos);
    return result;
}