Example usage for java.util Set toString

List of usage examples for java.util Set toString

Introduction

In this page you can find the example usage for java.util Set toString.

Prototype

public String toString() 

Source Link

Document

Returns a string representation of the object.

Usage

From source file:com.pactera.edg.am.metamanager.extractor.increment.impl.IncrementAnalysisServiceImpl.java

/**
 * ???CODE:,?,?,?,?,?, ,/*from  www. jav a  2  s  . c  om*/
 * 
 * @param dependencies
 * @throws MDependencyNotFoundException
 */
private void genRelationCode(List<MMDDependency> dependencies) throws MDependencyNotFoundException {
    Map<String, String> relationCodeCache = AdapterExtractorContext.getInstance().getRelationCodeCache();

    Set<String> errorCodes = new HashSet<String>();
    for (MMDDependency dependency : dependencies) {
        // ?HASHCODE
        // int hashCode = dependency.hashCode();
        String relationMetaModel = new StringBuilder(dependency.getOwnerMetadata().getClassifierId())
                .append("_").append(dependency.getOwnerRole()).append("_")
                .append(dependency.getValueMetadata().getClassifierId()).append("_")
                .append(dependency.getValueRole()).toString();

        if (relationCodeCache.containsKey(relationMetaModel)) {
            // ??HASHCODE
            String code = relationCodeCache.get(relationMetaModel);
            if (code != null) {
                dependency.setCode(code);
            }
        } else {
            String code = metaModelDao.genRelationCode(dependency);
            if (code == null) {
                errorCodes.add(new StringBuilder("?:")
                        .append(dependency.getOwnerMetadata().getClassifierId()).append(",:")
                        .append(dependency.getOwnerRole()).append(",?:")
                        .append(dependency.getValueMetadata().getClassifierId()).append(",:")
                        .append(dependency.getValueRole()).toString());
            } else {
                dependency.setCode(code);
                // ?
                relationCodeCache.put(relationMetaModel, dependency.getCode());
            }
        }
    }
    if (errorCodes.size() > 0) {
        String logMsg = "???:\n" + errorCodes.toString();
        log.error(logMsg);
        AdapterExtractorContext.addExtractorLog(ExtractorLogLevel.ERROR, logMsg);

        throw new MDependencyNotFoundException(errorCodes.toString());
    }

}

From source file:org.kuali.ole.module.purap.document.validation.impl.InvoicePurchaseOrderIdValidation.java

public boolean validate(AttributedDocumentEvent event) {
    boolean valid = true;
    boolean lineItemtypeIndicator = false;
    OleInvoiceDocument document = (OleInvoiceDocument) event.getDocument();
    GlobalVariables.getMessageMap().clearErrorPath();
    GlobalVariables.getMessageMap().addToErrorPath(OLEPropertyConstants.DOCUMENT);
    Integer POID = document.getPurchaseOrderIdentifier();
    if (document.getItems().size() > 0) {
        Set closedVendorIds = new TreeSet();
        for (OleInvoiceItem invoiceItem : (List<OleInvoiceItem>) document.getItems()) {
            if (invoiceItem.getItemType().isLineItemIndicator()) {
                lineItemtypeIndicator = true;
            }/*from  ww  w . ja  v a 2 s. c  om*/
            //PurchaseOrderDocument purchaseOrderDocument =document.getPurchaseOrderDocument(invoiceItem.getPurchaseOrderIdentifier());
            PurchaseOrderDocument purchaseOrderDocument = null;
            Map map = new HashMap();
            map.put("purapDocumentIdentifier", invoiceItem.getPurchaseOrderIdentifier());
            List<OlePurchaseOrderDocument> purchaseOrderDocumentList = (List<OlePurchaseOrderDocument>) KRADServiceLocator
                    .getBusinessObjectService().findMatching(OlePurchaseOrderDocument.class, map);
            if (purchaseOrderDocumentList != null && purchaseOrderDocumentList.size() > 0) {
                for (OlePurchaseOrderDocument poDoc : purchaseOrderDocumentList) {
                    if (poDoc.getPurchaseOrderCurrentIndicatorForSearching()) {
                        purchaseOrderDocument = poDoc;
                    }
                }
            }
            if (purchaseOrderDocument != null && purchaseOrderDocument.getDocumentHeader() == null) {
                purchaseOrderDocument.setDocumentHeader(SpringContext.getBean(DocumentHeaderService.class)
                        .getDocumentHeaderById(purchaseOrderDocument.getDocumentNumber()));

            }
            //OlePurchaseOrderDocument purchaseOrderDocument = (OlePurchaseOrderDocument) purchaseOrderDocument1;
            if (purchaseOrderDocument != null && purchaseOrderDocument.isPendingActionIndicator()) {
                GlobalVariables.getMessageMap().putError(PurapPropertyConstants.PURCHASE_ORDER_IDENTIFIER,
                        PurapKeyConstants.ERROR_PURCHASE_PENDING_ACTION);
                valid &= false;
            } else if (purchaseOrderDocument != null
                    && !StringUtils.equals(purchaseOrderDocument.getApplicationDocumentStatus(),
                            PurapConstants.PurchaseOrderStatuses.APPDOC_OPEN)) {
                closedVendorIds.add(purchaseOrderDocument.getPurapDocumentIdentifier());
                // if the PO is pending and it is not a Retransmit, we cannot generate a Invoice for it
            }
        }
        if (closedVendorIds.size() > 0) {
            GlobalVariables.getMessageMap().putError(PurapPropertyConstants.PURCHASE_ORDER_IDENTIFIER,
                    PurapKeyConstants.ERROR_POS_NOT_OPEN,
                    closedVendorIds.toString().replace("[", "").replace("]", ""));
            valid &= false;
        }
    } else {
        GlobalVariables.getMessageMap().putError(PurapPropertyConstants.PURCHASE_ORDER_IDENTIFIER,
                PurapKeyConstants.ERROR_PURCHASE_ORDER_NOT_EXIST);
        valid &= false;
    }

    //for (OlePurchaseOrderDocument purchaseOrderDocument : document.getPurchaseOrderDocuments()) {
    //PurchaseOrderDocument purchaseOrderDocument = document.getPurchaseOrderDocument();
    /*if (!lineItemtypeIndicator) {
        GlobalVariables.getMessageMap().putError(PurapPropertyConstants.PURCHASE_ORDER_IDENTIFIER, PurapKeyConstants.ERROR_PURCHASE_ORDER_NOT_EXIST);
        valid &= false;
    }*/
    return valid;
}

From source file:org.apache.accumulo.examples.wikisearch.logic.AbstractQueryLogic.java

public Results runQuery(Connector connector, List<String> authorizations, String query, Date beginDate,
        Date endDate, Set<String> types) {

    if (StringUtils.isEmpty(query)) {
        throw new IllegalArgumentException(
                "NULL QueryNode reference passed to " + this.getClass().getSimpleName());
    }/*from  w w  w . ja va 2 s  . c  om*/

    Set<Range> ranges = new HashSet<Range>();
    Set<String> typeFilter = types;
    String array[] = authorizations.toArray(new String[0]);
    Authorizations auths = new Authorizations(array);
    Results results = new Results();

    // Get the query string
    String queryString = query;

    StopWatch abstractQueryLogic = new StopWatch();
    StopWatch optimizedQuery = new StopWatch();
    StopWatch queryGlobalIndex = new StopWatch();
    StopWatch optimizedEventQuery = new StopWatch();
    StopWatch fullScanQuery = new StopWatch();
    StopWatch processResults = new StopWatch();

    abstractQueryLogic.start();

    StopWatch parseQuery = new StopWatch();
    parseQuery.start();

    QueryParser parser;
    try {
        if (log.isDebugEnabled()) {
            log.debug("ShardQueryLogic calling QueryParser.execute");
        }
        parser = new QueryParser();
        parser.execute(queryString);
    } catch (org.apache.commons.jexl2.parser.ParseException e1) {
        throw new IllegalArgumentException("Error parsing query", e1);
    }
    int hash = parser.getHashValue();
    parseQuery.stop();
    if (log.isDebugEnabled()) {
        log.debug(hash + " Query: " + queryString);
    }

    Set<String> fields = new HashSet<String>();
    for (String f : parser.getQueryIdentifiers()) {
        fields.add(f);
    }
    if (log.isDebugEnabled()) {
        log.debug("getQueryIdentifiers: " + parser.getQueryIdentifiers().toString());
    }
    // Remove any negated fields from the fields list, we don't want to lookup negated fields
    // in the index.
    fields.removeAll(parser.getNegatedTermsForOptimizer());

    if (log.isDebugEnabled()) {
        log.debug("getQueryIdentifiers: " + parser.getQueryIdentifiers().toString());
    }
    // Get the mapping of field name to QueryTerm object from the query. The query term object
    // contains the operator, whether its negated or not, and the literal to test against.
    Multimap<String, QueryTerm> terms = parser.getQueryTerms();

    // Find out which terms are indexed
    // TODO: Should we cache indexed terms or does that not make sense since we are always
    // loading data.
    StopWatch queryMetadata = new StopWatch();
    queryMetadata.start();
    Map<String, Multimap<String, Class<? extends Normalizer>>> metadataResults;
    try {
        metadataResults = findIndexedTerms(connector, auths, fields, typeFilter);
    } catch (Exception e1) {
        throw new RuntimeException("Error in metadata lookup", e1);
    }

    // Create a map of indexed term to set of normalizers for it
    Multimap<String, Normalizer> indexedTerms = HashMultimap.create();
    for (Entry<String, Multimap<String, Class<? extends Normalizer>>> entry : metadataResults.entrySet()) {
        // Get the normalizer from the normalizer cache
        for (Class<? extends Normalizer> clazz : entry.getValue().values()) {
            indexedTerms.put(entry.getKey(), normalizerCacheMap.get(clazz));
        }
    }
    queryMetadata.stop();
    if (log.isDebugEnabled()) {
        log.debug(hash + " Indexed Terms: " + indexedTerms.toString());
    }

    Set<String> orTerms = parser.getOrTermsForOptimizer();

    // Iterate over the query terms to get the operators specified in the query.
    ArrayList<String> unevaluatedExpressions = new ArrayList<String>();
    boolean unsupportedOperatorSpecified = false;
    for (Entry<String, QueryTerm> entry : terms.entries()) {
        if (null == entry.getValue()) {
            continue;
        }

        if (null != this.unevaluatedFields && this.unevaluatedFields.contains(entry.getKey().trim())) {
            unevaluatedExpressions.add(entry.getKey().trim() + " " + entry.getValue().getOperator() + " "
                    + entry.getValue().getValue());
        }

        int operator = JexlOperatorConstants.getJJTNodeType(entry.getValue().getOperator());
        if (!(operator == ParserTreeConstants.JJTEQNODE || operator == ParserTreeConstants.JJTNENODE
                || operator == ParserTreeConstants.JJTLENODE || operator == ParserTreeConstants.JJTLTNODE
                || operator == ParserTreeConstants.JJTGENODE || operator == ParserTreeConstants.JJTGTNODE
                || operator == ParserTreeConstants.JJTERNODE)) {
            unsupportedOperatorSpecified = true;
            break;
        }
    }
    if (null != unevaluatedExpressions)
        unevaluatedExpressions.trimToSize();
    if (log.isDebugEnabled()) {
        log.debug(hash + " unsupportedOperators: " + unsupportedOperatorSpecified + " indexedTerms: "
                + indexedTerms.toString() + " orTerms: " + orTerms.toString() + " unevaluatedExpressions: "
                + unevaluatedExpressions.toString());
    }

    // We can use the intersecting iterator over the field index as an optimization under the
    // following conditions
    //
    // 1. No unsupported operators in the query.
    // 2. No 'or' operators and at least one term indexed
    // or
    // 1. No unsupported operators in the query.
    // 2. and all terms indexed
    // or
    // 1. All or'd terms are indexed. NOTE, this will potentially skip some queries and push to a full table scan
    // // WE should look into finding a better way to handle whether we do an optimized query or not.
    boolean optimizationSucceeded = false;
    boolean orsAllIndexed = false;
    if (orTerms.isEmpty()) {
        orsAllIndexed = false;
    } else {
        orsAllIndexed = indexedTerms.keySet().containsAll(orTerms);
    }

    if (log.isDebugEnabled()) {
        log.debug("All or terms are indexed");
    }

    if (!unsupportedOperatorSpecified && (((null == orTerms || orTerms.isEmpty()) && indexedTerms.size() > 0)
            || (fields.size() > 0 && indexedTerms.size() == fields.size()) || orsAllIndexed)) {
        optimizedQuery.start();
        // Set up intersecting iterator over field index.

        // Get information from the global index for the indexed terms. The results object will contain the term
        // mapped to an object that contains the total count, and partitions where this term is located.

        // TODO: Should we cache indexed term information or does that not make sense since we are always loading data
        queryGlobalIndex.start();
        IndexRanges termIndexInfo;
        try {
            // If fields is null or zero, then it's probably the case that the user entered a value
            // to search for with no fields. Check for the value in index.
            if (fields.isEmpty()) {
                termIndexInfo = this.getTermIndexInformation(connector, auths, queryString, typeFilter);
                if (null != termIndexInfo && termIndexInfo.getRanges().isEmpty()) {
                    // Then we didn't find anything in the index for this query. This may happen for an indexed term that has wildcards
                    // in unhandled locations.
                    // Break out of here by throwing a named exception and do full scan
                    throw new DoNotPerformOptimizedQueryException();
                }
                // We need to rewrite the query string here so that it's valid.
                if (termIndexInfo instanceof UnionIndexRanges) {
                    UnionIndexRanges union = (UnionIndexRanges) termIndexInfo;
                    StringBuilder buf = new StringBuilder();
                    String sep = "";
                    for (String fieldName : union.getFieldNamesAndValues().keySet()) {
                        buf.append(sep).append(fieldName).append(" == ");
                        if (!(queryString.startsWith("'") && queryString.endsWith("'"))) {
                            buf.append("'").append(queryString).append("'");
                        } else {
                            buf.append(queryString);
                        }
                        sep = " or ";
                    }
                    if (log.isDebugEnabled()) {
                        log.debug("Rewrote query for non-fielded single term query: " + queryString + " to "
                                + buf.toString());
                    }
                    queryString = buf.toString();
                } else {
                    throw new RuntimeException("Unexpected IndexRanges implementation");
                }
            } else {
                RangeCalculator calc = this.getTermIndexInformation(connector, auths, indexedTerms, terms,
                        this.getIndexTableName(), this.getReverseIndexTableName(), queryString,
                        this.queryThreads, typeFilter);
                if (null == calc.getResult() || calc.getResult().isEmpty()) {
                    // Then we didn't find anything in the index for this query. This may happen for an indexed term that has wildcards
                    // in unhandled locations.
                    // Break out of here by throwing a named exception and do full scan
                    throw new DoNotPerformOptimizedQueryException();
                }
                termIndexInfo = new UnionIndexRanges();
                termIndexInfo.setIndexValuesToOriginalValues(calc.getIndexValues());
                termIndexInfo.setFieldNamesAndValues(calc.getIndexEntries());
                termIndexInfo.getTermCardinality().putAll(calc.getTermCardinalities());
                for (Range r : calc.getResult()) {
                    // foo is a placeholder and is ignored.
                    termIndexInfo.add("foo", r);
                }
            }
        } catch (TableNotFoundException e) {
            log.error(this.getIndexTableName() + "not found", e);
            throw new RuntimeException(this.getIndexTableName() + "not found", e);
        } catch (org.apache.commons.jexl2.parser.ParseException e) {
            throw new RuntimeException("Error determining ranges for query: " + queryString, e);
        } catch (DoNotPerformOptimizedQueryException e) {
            log.info("Indexed fields not found in index, performing full scan");
            termIndexInfo = null;
        }
        queryGlobalIndex.stop();

        // Determine if we should proceed with optimized query based on results from the global index
        boolean proceed = false;
        if (null == termIndexInfo || termIndexInfo.getFieldNamesAndValues().values().size() == 0) {
            proceed = false;
        } else if (null != orTerms && orTerms.size() > 0
                && (termIndexInfo.getFieldNamesAndValues().values().size() == indexedTerms.size())) {
            proceed = true;
        } else if (termIndexInfo.getFieldNamesAndValues().values().size() > 0) {
            proceed = true;
        } else if (orsAllIndexed) {
            proceed = true;
        } else {
            proceed = false;
        }
        if (log.isDebugEnabled()) {
            log.debug("Proceed with optimized query: " + proceed);
            if (null != termIndexInfo)
                log.debug("termIndexInfo.getTermsFound().size(): "
                        + termIndexInfo.getFieldNamesAndValues().values().size() + " indexedTerms.size: "
                        + indexedTerms.size() + " fields.size: " + fields.size());
        }
        if (proceed) {

            if (log.isDebugEnabled()) {
                log.debug(hash + " Performing optimized query");
            }
            // Use the scan ranges from the GlobalIndexRanges object as the ranges for the batch scanner
            ranges = termIndexInfo.getRanges();
            if (log.isDebugEnabled()) {
                log.info(hash + " Ranges: count: " + ranges.size() + ", " + ranges.toString());
            }

            // Create BatchScanner, set the ranges, and setup the iterators.
            optimizedEventQuery.start();
            BatchScanner bs = null;
            try {
                bs = connector.createBatchScanner(this.getTableName(), auths, queryThreads);
                bs.setRanges(ranges);
                IteratorSetting si = new IteratorSetting(21, "eval", OptimizedQueryIterator.class);

                if (log.isDebugEnabled()) {
                    log.debug("Setting scan option: " + EvaluatingIterator.QUERY_OPTION + " to " + queryString);
                }
                // Set the query option
                si.addOption(EvaluatingIterator.QUERY_OPTION, queryString);
                // Set the Indexed Terms List option. This is the field name and normalized field value pair separated
                // by a comma.
                StringBuilder buf = new StringBuilder();
                String sep = "";
                for (Entry<String, String> entry : termIndexInfo.getFieldNamesAndValues().entries()) {
                    buf.append(sep);
                    buf.append(entry.getKey());
                    buf.append(":");
                    buf.append(termIndexInfo.getIndexValuesToOriginalValues().get(entry.getValue()));
                    buf.append(":");
                    buf.append(entry.getValue());
                    if (sep.equals("")) {
                        sep = ";";
                    }
                }
                if (log.isDebugEnabled()) {
                    log.debug("Setting scan option: " + FieldIndexQueryReWriter.INDEXED_TERMS_LIST + " to "
                            + buf.toString());
                }
                FieldIndexQueryReWriter rewriter = new FieldIndexQueryReWriter();
                String q = "";
                try {
                    q = queryString;
                    q = rewriter.applyCaseSensitivity(q, true, false);// Set upper/lower case for fieldname/fieldvalue
                    Map<String, String> opts = new HashMap<String, String>();
                    opts.put(FieldIndexQueryReWriter.INDEXED_TERMS_LIST, buf.toString());
                    q = rewriter.removeNonIndexedTermsAndInvalidRanges(q, opts);
                    q = rewriter.applyNormalizedTerms(q, opts);
                    if (log.isDebugEnabled()) {
                        log.debug("runServerQuery, FieldIndex Query: " + q);
                    }
                } catch (org.apache.commons.jexl2.parser.ParseException ex) {
                    log.error("Could not parse query, Jexl ParseException: " + ex);
                } catch (Exception ex) {
                    log.error("Problem rewriting query, Exception: " + ex.getMessage());
                }
                si.addOption(BooleanLogicIterator.FIELD_INDEX_QUERY, q);

                // Set the term cardinality option
                sep = "";
                buf.delete(0, buf.length());
                for (Entry<String, Long> entry : termIndexInfo.getTermCardinality().entrySet()) {
                    buf.append(sep);
                    buf.append(entry.getKey());
                    buf.append(":");
                    buf.append(entry.getValue());
                    sep = ",";
                }
                if (log.isDebugEnabled())
                    log.debug("Setting scan option: " + BooleanLogicIterator.TERM_CARDINALITIES + " to "
                            + buf.toString());
                si.addOption(BooleanLogicIterator.TERM_CARDINALITIES, buf.toString());
                if (this.useReadAheadIterator) {
                    if (log.isDebugEnabled()) {
                        log.debug("Enabling read ahead iterator with queue size: " + this.readAheadQueueSize
                                + " and timeout: " + this.readAheadTimeOut);
                    }
                    si.addOption(ReadAheadIterator.QUEUE_SIZE, this.readAheadQueueSize);
                    si.addOption(ReadAheadIterator.TIMEOUT, this.readAheadTimeOut);

                }

                if (null != unevaluatedExpressions) {
                    StringBuilder unevaluatedExpressionList = new StringBuilder();
                    String sep2 = "";
                    for (String exp : unevaluatedExpressions) {
                        unevaluatedExpressionList.append(sep2).append(exp);
                        sep2 = ",";
                    }
                    if (log.isDebugEnabled())
                        log.debug("Setting scan option: " + EvaluatingIterator.UNEVALUTED_EXPRESSIONS + " to "
                                + unevaluatedExpressionList.toString());
                    si.addOption(EvaluatingIterator.UNEVALUTED_EXPRESSIONS,
                            unevaluatedExpressionList.toString());
                }

                bs.addScanIterator(si);

                processResults.start();
                processResults.suspend();
                long count = 0;
                for (Entry<Key, Value> entry : bs) {
                    count++;
                    // The key that is returned by the EvaluatingIterator is not the same key that is in
                    // the table. The value that is returned by the EvaluatingIterator is a kryo
                    // serialized EventFields object.
                    processResults.resume();
                    Document d = this.createDocument(entry.getKey(), entry.getValue());
                    results.getResults().add(d);
                    processResults.suspend();
                }
                log.info(count + " matching entries found in optimized query.");
                optimizationSucceeded = true;
                processResults.stop();
            } catch (TableNotFoundException e) {
                log.error(this.getTableName() + "not found", e);
                throw new RuntimeException(this.getIndexTableName() + "not found", e);
            } finally {
                if (bs != null) {
                    bs.close();
                }
            }
            optimizedEventQuery.stop();
        }
        optimizedQuery.stop();
    }

    // WE should look into finding a better way to handle whether we do an optimized query or not.
    // We are not setting up an else condition here because we may have aborted the logic early in the if statement.
    if (!optimizationSucceeded || ((null != orTerms && orTerms.size() > 0)
            && (indexedTerms.size() != fields.size()) && !orsAllIndexed)) {
        // if (!optimizationSucceeded || ((null != orTerms && orTerms.size() > 0) && (indexedTerms.size() != fields.size()))) {
        fullScanQuery.start();
        if (log.isDebugEnabled()) {
            log.debug(hash + " Performing full scan query");
        }

        // Set up a full scan using the date ranges from the query
        // Create BatchScanner, set the ranges, and setup the iterators.
        BatchScanner bs = null;
        try {
            // The ranges are the start and end dates
            Collection<Range> r = getFullScanRange(beginDate, endDate, terms);
            ranges.addAll(r);

            if (log.isDebugEnabled()) {
                log.debug(hash + " Ranges: count: " + ranges.size() + ", " + ranges.toString());
            }

            bs = connector.createBatchScanner(this.getTableName(), auths, queryThreads);
            bs.setRanges(ranges);
            IteratorSetting si = new IteratorSetting(22, "eval", EvaluatingIterator.class);
            // Create datatype regex if needed
            if (null != typeFilter) {
                StringBuilder buf = new StringBuilder();
                String s = "";
                for (String type : typeFilter) {
                    buf.append(s).append(type).append(".*");
                    s = "|";
                }
                if (log.isDebugEnabled())
                    log.debug("Setting colf regex iterator to: " + buf.toString());
                IteratorSetting ri = new IteratorSetting(21, "typeFilter", RegExFilter.class);
                RegExFilter.setRegexs(ri, null, buf.toString(), null, null, false);
                bs.addScanIterator(ri);
            }
            if (log.isDebugEnabled()) {
                log.debug("Setting scan option: " + EvaluatingIterator.QUERY_OPTION + " to " + queryString);
            }
            si.addOption(EvaluatingIterator.QUERY_OPTION, queryString);
            if (null != unevaluatedExpressions) {
                StringBuilder unevaluatedExpressionList = new StringBuilder();
                String sep2 = "";
                for (String exp : unevaluatedExpressions) {
                    unevaluatedExpressionList.append(sep2).append(exp);
                    sep2 = ",";
                }
                if (log.isDebugEnabled())
                    log.debug("Setting scan option: " + EvaluatingIterator.UNEVALUTED_EXPRESSIONS + " to "
                            + unevaluatedExpressionList.toString());
                si.addOption(EvaluatingIterator.UNEVALUTED_EXPRESSIONS, unevaluatedExpressionList.toString());
            }
            bs.addScanIterator(si);
            long count = 0;
            processResults.start();
            processResults.suspend();
            for (Entry<Key, Value> entry : bs) {
                count++;
                // The key that is returned by the EvaluatingIterator is not the same key that is in
                // the partition table. The value that is returned by the EvaluatingIterator is a kryo
                // serialized EventFields object.
                processResults.resume();
                Document d = this.createDocument(entry.getKey(), entry.getValue());
                results.getResults().add(d);
                processResults.suspend();
            }
            processResults.stop();
            log.info(count + " matching entries found in full scan query.");
        } catch (TableNotFoundException e) {
            log.error(this.getTableName() + "not found", e);
        } finally {
            if (bs != null) {
                bs.close();
            }
        }
        fullScanQuery.stop();
    }

    log.info("AbstractQueryLogic: " + queryString + " " + timeString(abstractQueryLogic.getTime()));
    log.info("  1) parse query " + timeString(parseQuery.getTime()));
    log.info("  2) query metadata " + timeString(queryMetadata.getTime()));
    log.info("  3) full scan query " + timeString(fullScanQuery.getTime()));
    log.info("  3) optimized query " + timeString(optimizedQuery.getTime()));
    log.info("  1) process results " + timeString(processResults.getTime()));
    log.info("      1) query global index " + timeString(queryGlobalIndex.getTime()));
    log.info(hash + " Query completed.");

    return results;
}

From source file:com.vmware.bdd.plugin.clouderamgr.service.CmClusterValidator.java

public boolean validateBlueprint(ClusterBlueprint blueprint) {
    logger.info("Start to validate blueprint for cluster " + blueprint.getName());

    String distro = blueprint.getHadoopStack().getDistro();
    String distroVersion = CmUtils.distroVersionOfHadoopStack(blueprint.getHadoopStack());

    try {// www.  j  a va2s.  co m
        List<String> unRecogConfigTypes = new ArrayList<String>();
        List<String> unRecogConfigKeys = new ArrayList<>();
        validateConfigs(blueprint.getConfiguration(), unRecogConfigTypes, unRecogConfigKeys, distroVersion);

        Set<String> availableRoles = AvailableServiceRoleContainer.allRoles(distroVersion);

        Set<String> definedServices = new HashSet<String>();
        Map<String, Integer> definedRoles = new HashMap<String, Integer>();

        List<String> unRecogRoles = null;
        Set<String> invalidRacks = null;

        if (blueprint.getNodeGroups() == null || blueprint.getNodeGroups().isEmpty()) {
            return false;
        }

        int nnGroupsNum = 0;
        for (NodeGroupInfo group : blueprint.getNodeGroups()) {
            validateConfigs(group.getConfiguration(), unRecogConfigTypes, unRecogConfigKeys, distroVersion);
            if (group.getRoles().contains("HDFS_NAMENODE")) {
                nnGroupsNum++;
            }

            for (String roleName : group.getRoles()) {
                if (!availableRoles.contains(roleName)) {
                    if (unRecogRoles == null) {
                        unRecogRoles = new ArrayList<String>();
                    }
                    unRecogRoles.add(roleName);
                } else {
                    if (!definedRoles.containsKey(roleName)) {
                        definedRoles.put(roleName, group.getInstanceNum());
                    } else {
                        Integer instanceNum = definedRoles.get(roleName) + group.getInstanceNum();
                        definedRoles.put(roleName, instanceNum);
                    }
                    definedServices
                            .add(AvailableServiceRoleContainer.load(roleName).getParent().getDisplayName());
                }
            }
        }

        if (nnGroupsNum > 1) {
            errorMsgList.add("Namenode federation is not supported currently");
        }

        if (unRecogRoles != null && !unRecogRoles.isEmpty()) { // point 1: unrecognized roles
            errorMsgList.add("Roles " + unRecogRoles.toString() + " are not available by distro " + distro);
        }

        if (!unRecogConfigTypes.isEmpty()) { // point 2: add to warning list as will be ignored by creating logic
            warningMsgList.add("Configurations for " + unRecogConfigTypes.toString()
                    + " are not available by distro " + distro);
        }

        if (!unRecogConfigKeys.isEmpty()) { // point 3
            errorMsgList.add("Configuration items " + unRecogConfigKeys.toString() + " are invalid");
        }

        if (invalidRacks != null && !invalidRacks.isEmpty()) {
            errorMsgList.add("Racks " + invalidRacks.toString() + " are invalid,"
                    + " rack names must be slash-separated, like Unix paths. For example, \"/rack1\" and \"/cabinet3/rack4\"");
        }

        for (String serviceName : definedServices) {
            // service dependency check
            for (AvailableServiceRole.Dependency dependency : AvailableServiceRoleContainer.load(serviceName)
                    .getDependencies()) {
                if (!dependency.isRequired()) {
                    continue;
                }
                if (dependency.getServices().size() == 1
                        && !definedServices.contains(dependency.getServices().get(0))) {
                    if (serviceName.equals("YARN") && isComputeOnly(definedServices)) {
                        continue;
                    }
                    warningMsgList
                            .add(serviceName + " depends on " + dependency.getServices().get(0) + " service");
                } else {
                    boolean found = false;
                    for (String dependService : dependency.getServices()) {
                        if (definedServices.contains(dependService)) {
                            found = true;
                        }
                    }
                    if (!found) {
                        warningMsgList.add(serviceName + " depends on one service of "
                                + dependency.getServices().toString());
                    }
                }
            }

            Set<String> requiredRoles = new HashSet<String>();
            switch (serviceName) {
            case "HDFS":
                if (!isComputeOnly(definedServices)) {
                    requiredRoles.add("HDFS_NAMENODE");
                    requiredRoles.add("HDFS_DATANODE");
                }
                if (checkRequiredRoles(serviceName, requiredRoles, definedRoles.keySet(), errorMsgList)) {
                    if (nnGroupsNum == 1) {
                        if (definedRoles.get("HDFS_NAMENODE") < 2
                                && !definedRoles.containsKey("HDFS_SECONDARY_NAMENODE")) {
                            errorMsgList.add(
                                    "HDFS service not configured for High Availability must have a SecondaryNameNode");
                        }
                        if (definedRoles.get("HDFS_NAMENODE") >= 2
                                && !definedRoles.containsKey("HDFS_JOURNALNODE")) {
                            errorMsgList.add(
                                    "HDFS service configured for High Availability must have journal nodes");
                        }
                    }
                }
                if (definedRoles.containsKey("HDFS_JOURNALNODE")) {
                    if (definedRoles.get("HDFS_JOURNALNODE") > 1 && definedRoles.get("HDFS_JOURNALNODE") < 3) {
                        errorMsgList.add(Constants.WRONG_NUM_OF_JOURNALNODE);
                    } else if (definedRoles.get("HDFS_JOURNALNODE") % 2 == 0) {
                        warningMsgList.add(Constants.ODD_NUM_OF_JOURNALNODE);
                    }
                }
                break;
            case "YARN":
                requiredRoles.add("YARN_RESOURCE_MANAGER");
                requiredRoles.add("YARN_NODE_MANAGER");
                requiredRoles.add("YARN_JOB_HISTORY");
                if (checkRequiredRoles(serviceName, requiredRoles, definedRoles.keySet(), errorMsgList)) {
                    if (definedRoles.get("YARN_RESOURCE_MANAGER") > 1) {
                        errorMsgList.add(Constants.WRONG_NUM_OF_RESOURCEMANAGER);
                    }
                }
                break;
            case "MAPREDUCE":
                requiredRoles.add("MAPREDUCE_JOBTRACKER");
                requiredRoles.add("MAPREDUCE_TASKTRACKER");
                if (checkRequiredRoles(serviceName, requiredRoles, definedRoles.keySet(), errorMsgList)) {
                    if (definedRoles.get("MAPREDUCE_JOBTRACKER") > 1) {
                        errorMsgList.add(Constants.WRONG_NUM_OF_JOBTRACKER);
                    }
                }
                break;
            case "HBASE":
                requiredRoles.add("HBASE_MASTER");
                requiredRoles.add("HBASE_REGION_SERVER");
                checkRequiredRoles(serviceName, requiredRoles, definedRoles.keySet(), errorMsgList);
                break;
            case "ZOOKEEPER":
                requiredRoles.add("ZOOKEEPER_SERVER");
                if (checkRequiredRoles(serviceName, requiredRoles, definedRoles.keySet(), errorMsgList)) {
                    if (definedRoles.get("ZOOKEEPER_SERVER") > 0 && definedRoles.get("ZOOKEEPER_SERVER") < 3) {
                        errorMsgList.add(Constants.WRONG_NUM_OF_ZOOKEEPER);
                    } else if (definedRoles.get("ZOOKEEPER_SERVER") % 2 == 0) {
                        warningMsgList.add(Constants.ODD_NUM_OF_ZOOKEEPER);
                    }
                }
                break;
            case "HIVE":
                requiredRoles.add("HIVE_METASTORE");
                requiredRoles.add("HIVE_SERVER2");
                checkRequiredRoles(serviceName, requiredRoles, definedRoles.keySet(), errorMsgList);
                String[] requiredConfigs = { "hive_metastore_database_host", "hive_metastore_database_name",
                        "hive_metastore_database_password", "hive_metastore_database_port",
                        "hive_metastore_database_type", "hive_metastore_database_user" };

                boolean configured = true;
                if (blueprint.getConfiguration().containsKey("HIVE")) {
                    Map<String, String> configuredItems = (Map<String, String>) blueprint.getConfiguration()
                            .get("HIVE");
                    for (String item : requiredConfigs) {
                        if (!configuredItems.containsKey(item)) {
                            configured = false;
                            break;
                        }
                    }
                } else {
                    configured = false;
                }

                if (!configured) {
                    errorMsgList.add(
                            "HIVE service depends on an external database, please setup one and provide configuration properties ["
                                    + StringUtils.join(requiredConfigs, ",") + "] for HIVE service");
                }

                break;
            case "OOZIE":
                if (definedRoles.get("OOZIE_SERVER") > 1) {
                    errorMsgList.add("only one OOZIE_SERVER is allowed for OOZIE service");
                }
                break;
            case "SENTRY":
                if (definedRoles.get("SENTRY_SERVER") > 1) {
                    errorMsgList.add("only one SENTRY_SERVER is allowed for SENTRY service");
                }
                break;
            case "SQOOP":
                if (definedRoles.get("SQOOP_SERVER") > 1) {
                    errorMsgList.add("only one SQOOP_SERVER is allowed for SQOOP service");
                }
                break;
            case "ISILON":
                requiredRoles.add("YARN_RESOURCE_MANAGER");
                requiredRoles.add("YARN_JOB_HISTORY");
                requiredRoles.add("YARN_NODE_MANAGER");
                requiredRoles.add("GATEWAY");
                break;
            default:
                break;
            }

        }

    } catch (IOException e) {
        // IO exception ignored
    }

    if (!warningMsgList.isEmpty() || !errorMsgList.isEmpty()) {
        throw ValidationException.VALIDATION_FAIL("Blueprint", errorMsgList, warningMsgList);
    }

    return true;
}

From source file:org.apache.geode.internal.cache.wan.WANTestBase.java

public static void createLocator(int dsId, int port, Set<String> localLocatorsList,
        Set<String> remoteLocatorsList) {
    WANTestBase test = new WANTestBase();
    Properties props = test.getDistributedSystemProperties();
    props.setProperty(MCAST_PORT, "0");
    props.setProperty(DISTRIBUTED_SYSTEM_ID, "" + dsId);
    StringBuffer localLocatorBuffer = new StringBuffer(localLocatorsList.toString());
    localLocatorBuffer.deleteCharAt(0);/*from ww  w  .  j  a  va 2s.  c  om*/
    localLocatorBuffer.deleteCharAt(localLocatorBuffer.lastIndexOf("]"));
    String localLocator = localLocatorBuffer.toString();
    localLocator = localLocator.replace(" ", "");

    props.setProperty(LOCATORS, localLocator);
    props.setProperty(START_LOCATOR,
            "localhost[" + port + "],server=true,peer=true,hostname-for-clients=localhost");
    StringBuffer remoteLocatorBuffer = new StringBuffer(remoteLocatorsList.toString());
    remoteLocatorBuffer.deleteCharAt(0);
    remoteLocatorBuffer.deleteCharAt(remoteLocatorBuffer.lastIndexOf("]"));
    String remoteLocator = remoteLocatorBuffer.toString();
    remoteLocator = remoteLocator.replace(" ", "");
    props.setProperty(REMOTE_LOCATORS, remoteLocator);
    test.getSystem(props);
}

From source file:com.ah.ui.actions.monitor.MapNodeAction.java

private String getActiveSsidText(String macAddress) {
    List<?> list_ssid = QueryUtil.executeQuery("select ssidName from " + AhLatestXif.class.getSimpleName(),
            null, new FilterParams("apMac", macAddress));
    Set<String> activeSsids = new HashSet<String>();
    String str_ssids;//from www .  j a va  2s .  c om

    for (Object obj : list_ssid) {
        String ssid = (String) obj;
        if (null != ssid && !("".equals(ssid.trim())) && !("N/A".equals(ssid.trim()))) {
            activeSsids.add(ssid);
        }
    }

    if (activeSsids.size() == 0) {
        str_ssids = "N/A";
    } else {
        str_ssids = activeSsids.toString();
    }
    return str_ssids;
}

From source file:com.dell.asm.asmcore.asmmanager.util.DeploymentValidator.java

private void checkDuplicateVMNames(DeploymentValid deploymentValid,
        Map<String, ServiceTemplateComponent> componentMap,
        Map<ServiceTemplateComponentType, Set<String>> componentTypeMap, Deployment deployment) {
    Set<String> duplicates = new HashSet<>();
    Set<String> currentVMNames = new HashSet<>();
    Set<String> componentIds = componentTypeMap.get(ServiceTemplateComponentType.VIRTUALMACHINE);
    if (componentIds != null && componentIds.size() > 0) {
        for (String componentId : componentIds) {
            ServiceTemplateComponent component = componentMap.get(componentId);
            if (component != null) {
                ServiceTemplateSetting vmNameSetting = component.getParameter(
                        ServiceTemplateSettingIDs.SERVICE_TEMPLATE_VM_RESOURCE,
                        ServiceTemplateSettingIDs.SERVICE_TEMPLATE_VM_NAME);
                if (vmNameSetting != null && StringUtils.isNotBlank(vmNameSetting.getValue())) {
                    // keep track of duplicates
                    if (!currentVMNames.add(vmNameSetting.getValue())) {
                        duplicates.add(vmNameSetting.getValue());
                    }/*from w w  w .  j av  a 2s  . com*/
                }
                vmNameSetting = component.getParameter(
                        ServiceTemplateSettingIDs.SERVICE_TEMPLATE_SERVER_OS_RESOURCE,
                        ServiceTemplateSettingIDs.SERVICE_TEMPLATE_SERVER_OS_HOSTNAME_ID);
                if (vmNameSetting != null && StringUtils.isNotBlank(vmNameSetting.getValue())) {
                    // keep track of duplicates
                    if (!currentVMNames.add(vmNameSetting.getValue())) {
                        duplicates.add(vmNameSetting.getValue());
                    }
                }
            }
        }
    }

    List<DeploymentNamesRefEntity> entities = getDeploymentNamesRefDAO()
            .getAllDeploymentNamesRefsByType(DeploymentNamesType.VM_NAME);
    if (entities != null && entities.size() > 0) {
        String thisDeploymentId = deployment.getId();
        for (DeploymentNamesRefEntity entity : entities) {
            if (entity.getDeploymentId() != null && !entity.getDeploymentId().equals(thisDeploymentId)) {
                if (currentVMNames.contains(entity.getName())) {
                    duplicates.add(entity.getName());
                }
            }
        }
    }
    if (!duplicates.isEmpty()) {
        logger.error("Duplicate vm names found for Deployment " + deployment.getDeploymentName() + ". Found "
                + duplicates.size() + " number of duplicate names.");
        deploymentValid.setValid(false);
        deploymentValid.addMessage(AsmManagerMessages.duplicateVMNameDeployed(duplicates.toString()));
    }

}

From source file:org.apache.geode.management.internal.beans.QueryDataFunction.java

public static Object queryData(final String query, final String members, final int limit,
        final boolean zipResult, final int queryResultSetLimit, final int queryCollectionsDepth)
        throws Exception {

    if (query == null || query.isEmpty()) {
        return new JsonisedErrorMessage(ManagementStrings.QUERY__MSG__QUERY_EMPTY.toLocalizedString())
                .toString();/*from  w ww.  ja  v  a  2  s. com*/
    }

    Set<DistributedMember> inputMembers = null;
    if (StringUtils.isNotBlank(members)) {
        inputMembers = new HashSet<>();
        StringTokenizer st = new StringTokenizer(members, ",");
        while (st.hasMoreTokens()) {
            String member = st.nextToken();
            DistributedMember distributedMember = BeanUtilFuncs.getDistributedMemberByNameOrId(member);
            inputMembers.add(distributedMember);
            if (distributedMember == null) {
                return new JsonisedErrorMessage(
                        ManagementStrings.QUERY__MSG__INVALID_MEMBER.toLocalizedString(member)).toString();
            }
        }
    }

    InternalCache cache = (InternalCache) CacheFactory.getAnyInstance();
    try {

        SystemManagementService service = (SystemManagementService) ManagementService
                .getExistingManagementService(cache);
        Set<String> regionsInQuery = compileQuery(cache, query);

        // Validate region existence
        if (regionsInQuery.size() > 0) {
            for (String regionPath : regionsInQuery) {
                DistributedRegionMXBean regionMBean = service.getDistributedRegionMXBean(regionPath);
                if (regionMBean == null) {
                    return new JsonisedErrorMessage(
                            ManagementStrings.QUERY__MSG__REGIONS_NOT_FOUND.toLocalizedString(regionPath))
                                    .toString();
                } else {
                    Set<DistributedMember> associatedMembers = DataCommandsUtils
                            .getRegionAssociatedMembers(regionPath, cache, true);

                    if (inputMembers != null && inputMembers.size() > 0) {
                        if (!associatedMembers.containsAll(inputMembers)) {
                            return new JsonisedErrorMessage(
                                    ManagementStrings.QUERY__MSG__REGIONS_NOT_FOUND_ON_MEMBERS
                                            .toLocalizedString(regionPath)).toString();
                        }
                    }
                }
            }
        } else {
            return new JsonisedErrorMessage(ManagementStrings.QUERY__MSG__INVALID_QUERY
                    .toLocalizedString("Region mentioned in query probably missing /")).toString();
        }

        // Validate
        if (regionsInQuery.size() > 1 && inputMembers == null) {
            for (String regionPath : regionsInQuery) {
                DistributedRegionMXBean regionMBean = service.getDistributedRegionMXBean(regionPath);

                if (regionMBean.getRegionType().equals(DataPolicy.PARTITION.toString())
                        || regionMBean.getRegionType().equals(DataPolicy.PERSISTENT_PARTITION.toString())) {
                    return new JsonisedErrorMessage(
                            ManagementStrings.QUERY__MSG__JOIN_OP_EX.toLocalizedString()).toString();
                }
            }
        }

        String randomRegion = regionsInQuery.iterator().next();

        Set<DistributedMember> associatedMembers = DataCommandsUtils
                .getQueryRegionsAssociatedMembers(regionsInQuery, cache, false);// First
        // available
        // member

        if (associatedMembers != null && associatedMembers.size() > 0) {
            Object[] functionArgs = new Object[6];
            if (inputMembers != null && inputMembers.size() > 0) {// on input
                // members

                functionArgs[DISPLAY_MEMBERWISE] = true;
                functionArgs[QUERY] = query;
                functionArgs[REGION] = randomRegion;
                functionArgs[LIMIT] = limit;
                functionArgs[QUERY_RESULTSET_LIMIT] = queryResultSetLimit;
                functionArgs[QUERY_COLLECTIONS_DEPTH] = queryCollectionsDepth;
                return callFunction(functionArgs, inputMembers, zipResult);
            } else { // Query on any random member
                functionArgs[DISPLAY_MEMBERWISE] = false;
                functionArgs[QUERY] = query;
                functionArgs[REGION] = randomRegion;
                functionArgs[LIMIT] = limit;
                functionArgs[QUERY_RESULTSET_LIMIT] = queryResultSetLimit;
                functionArgs[QUERY_COLLECTIONS_DEPTH] = queryCollectionsDepth;
                return callFunction(functionArgs, associatedMembers, zipResult);
            }

        } else {
            return new JsonisedErrorMessage(ManagementStrings.QUERY__MSG__REGIONS_NOT_FOUND
                    .toLocalizedString(regionsInQuery.toString())).toString();
        }

    } catch (QueryInvalidException qe) {
        return new JsonisedErrorMessage(
                ManagementStrings.QUERY__MSG__INVALID_QUERY.toLocalizedString(qe.getMessage())).toString();
    }
}

From source file:com.chinamobile.bcbsp.bspcontroller.JobInProgress.java

/**
 * Strategy for finding slow staff(note: the number of staffs is 5 at least!)
 * This strategy can be changed//  www  . j  a  v a 2 s  . co  m
 * @param runStaffs
 *        runstaffs on workermanager
 * @return slowstaffs id
 */
private Set<Integer> getSlowStaffIDs(StaffMigrate[] runStaffs) {
    int ID = 0;// Feng added for test
    StaffMigrate tmp;
    // Bubble sort
    for (int i = 0; i < runStaffs.length - 1; i++) {
        boolean flag = true;
        for (int j = 1; j < runStaffs.length - i; j++) {
            if (runStaffs[j - 1].getStaffRunTime() > runStaffs[j].getStaffRunTime()) {
                flag = false;
                tmp = runStaffs[j - 1];
                runStaffs[j - 1] = runStaffs[j];
                runStaffs[j] = tmp;
            }
        }
        if (flag) {
            break;
        }
    }
    long totalTime = 0;
    long avgTime;
    if (runStaffs.length > 2) {
        for (int i = 1; i < runStaffs.length - 1; i++) {
            totalTime += runStaffs[i].getStaffRunTime();
        }
        avgTime = totalTime / (runStaffs.length - 2);
    } else {
        for (int i = 0; i < runStaffs.length; i++) {
            totalTime += runStaffs[i].getStaffRunTime();
        }
        avgTime = totalTime / (runStaffs.length);
    }
    int quarterIndex = runStaffs.length / 4;
    int threeQuarterIndex = runStaffs.length * 3 / 4;
    long IRQ = runStaffs[threeQuarterIndex].getStaffRunTime() - runStaffs[quarterIndex].getStaffRunTime();
    LOG.info("IRQ is " + IRQ);
    long threshold = (long) (IRQ * 1.5 + runStaffs[threeQuarterIndex].getStaffRunTime());
    LOG.info("staff runtime threshold is " + threshold);
    Set<Integer> setIDs = new HashSet<Integer>();
    for (int i = threeQuarterIndex; i < runStaffs.length; i++) {
        if (runStaffs[i].getStaffRunTime() > threshold) {
            LOG.info("Slow staff" + runStaffs[i].getStaffID() + " is first selected!");
            // Second select(using cost model)
            if ((job.getNumSuperStep() - runStaffs[i].getCurrentSuperStep())
                    * (runStaffs[i].getStaffRunTime() - avgTime) > runStaffs[i].getMigrateCost()) {
                LOG.info("Slow staff" + runStaffs[i].getStaffID() + " is second selected!");
                /*
                 * Review suggestion: allow user to determine the times for finding
                 * the slow staff Zhicheng Liu 2013/10/9
                 */
                // Third select(predict worker's burden)
                if (shouldMigrate(runStaffs[i])) {
                    int id = runStaffs[i].getStaffID();
                    LOG.info("Slow staff" + id + " is third selected!");
                    // Fourth select(continuous[3] testing)
                    if (this.staffSlowCount[id] != this.conf.getInt("bcbsp.loadbalance.findslowstaff.maxturn",
                            3) - 1) {
                        this.staffSlowCount[id] += 1;
                    } else {
                        this.staffSlowCount[id] = 0;
                        setIDs.add(id);
                    }
                }
            }
        }
    }
    ID = runStaffs[ID].getStaffID();
    if (runStaffs[ID].currSuperStep == 6) {
        setIDs.add(ID);// Feng added for test
        LOG.info("Feng test getSlowStaffID! " + setIDs.toString());
    }
    return setIDs;
}

From source file:io.seldon.recommendation.baseline.MostPopularInSessionRecommender.java

/**
 * Note this recommender does not respect any dimensions passed in
 *//* www  .j  a  v a 2  s  . c om*/
@Override
public ItemRecommendationResultSet recommend(String client, Long user, Set<Integer> dimensions,
        int maxRecsCount, RecommendationContext ctxt, List<Long> recentItemInteractions) {
    RecommendationContext.OptionsHolder options = ctxt.getOptsHolder();

    DimPopularityStore store = itemsManager.getClientStore(client, options);
    if (store == null) {
        if (logger.isDebugEnabled())
            logger.debug("Failed to find popular session data for client " + client);
        return new ItemRecommendationResultSet(
                Collections.<ItemRecommendationResultSet.ItemRecommendationResult>emptyList(), name);
    }

    String attrs = options.getStringOption(ATTRS_PROPERTY_NAME);
    int maxDepth = options.getIntegerOption(DEPTH_PROPERTY_NAME);
    ConsumerBean c = new ConsumerBean(client);
    String[] attrNames = attrs.split(",");
    Set<Long> exclusions = Collections.emptySet();
    if (ctxt.getMode() == RecommendationContext.MODE.EXCLUSION) {
        exclusions = ctxt.getContextItems();
    }
    if (logger.isDebugEnabled()) {
        logger.debug("user " + user + " recentItems:" + recentItemInteractions.toString() + " depth:" + maxDepth
                + " attrs " + attrs);
    }
    Map<Long, Double> scores = new HashMap<>();
    for (int depth = 0; depth < maxDepth; depth++) {
        if (recentItemInteractions.size() <= depth)
            break;
        long recentItem = recentItemInteractions.get(depth);
        Map<String, Integer> attrDims = itemService.getDimensionIdsForItem(c, recentItem);
        double lowestScore = 1.0;
        if (logger.isDebugEnabled())
            logger.debug("Looking at item " + recentItem + " has attrDim size " + attrDims.size());
        for (String attr : attrNames) {
            Integer dim = attrDims.get(attr);
            if (dim != null) {
                List<ItemCount> counts = store.getTopItemsForDimension(dim);
                if (counts != null) {
                    double maxCount = 0;
                    double lowScore = 1.0;
                    for (ItemCount ic : counts) {
                        if (!exclusions.contains(ic.item)) {
                            Map<String, Integer> attrDimsCandidate = itemService.getDimensionIdsForItem(c,
                                    ic.item);
                            if (CollectionUtils.containsAny(dimensions, attrDimsCandidate.values())
                                    || dimensions.contains(Constants.DEFAULT_DIMENSION)) {
                                if (logger.isDebugEnabled())
                                    logger.debug("Adding item " + ic.item + " from dimension " + attr);
                                if (maxCount == 0)
                                    maxCount = ic.count;
                                double normCount = (ic.count / maxCount) * lowestScore; //scale to be a score lower than previous values if any
                                if (scores.containsKey(ic.item))
                                    scores.put(ic.item, scores.get(ic.item) + normCount);
                                else
                                    scores.put(ic.item, normCount);
                                lowScore = normCount;
                                if (scores.size() >= maxRecsCount)
                                    break;
                            } else {
                                if (logger.isDebugEnabled())
                                    logger.debug("Ignoring prospective item " + ic.item
                                            + " as not in dimensions " + dimensions.toString());
                            }
                        } else {
                            if (logger.isDebugEnabled())
                                logger.debug("Excluding item " + ic.item);
                        }
                    }
                    lowestScore = lowScore;//update lowest from this loop
                } else {
                    if (logger.isDebugEnabled())
                        logger.debug("No counts for dimension " + dim + " attribute name " + attr);
                }
            } else {
                logger.warn("Failed to find attr " + attr + " for item " + recentItem);
            }
            if (scores.size() >= maxRecsCount)
                break;
        }

    }
    Map<Long, Double> scaledScores = RecommendationUtils.rescaleScoresToOne(scores, maxRecsCount);
    List<ItemRecommendationResultSet.ItemRecommendationResult> results = new ArrayList<>();
    for (Map.Entry<Long, Double> e : scaledScores.entrySet()) {
        results.add(new ItemRecommendationResultSet.ItemRecommendationResult(e.getKey(),
                e.getValue().floatValue()));
    }
    if (logger.isDebugEnabled())
        logger.debug("Returning " + results.size() + " recommendations");
    return new ItemRecommendationResultSet(results, name);
}