Example usage for java.util Map toString

List of usage examples for java.util Map toString

Introduction

In this page you can find the example usage for java.util Map toString.

Prototype

public String toString() 

Source Link

Document

Returns a string representation of the object.

Usage

From source file:org.metis.sql.SqlStmnt.java

/**
 * Called by the Controller bean (RDB or PDB) to execute this SQL statement
 * with the given params.//from w w  w.j  av a 2 s .  c o  m
 * 
 * @param params
 * @throws SQLException
 */
public SqlResult execute(List<Map<String, String>> params) {

    if (params == null) {
        params = new ArrayList<Map<String, String>>();
    }

    LOG.debug("execute: executing this statement: " + getOriginal());
    LOG.debug("execute: ... with this number of param maps  = " + params.size());

    // first, do some light validation work
    if (params.size() == 0 && (isPrepared() || isCallable())) {
        // if it is callable and it requires an IN param
        if (isCallable() && getInTokens().size() > 0) {
            LOG.error("execute: ERROR, IN params were not provided "
                    + "for this callable statement that requires IN params: " + getPrepared());
            return null;
        }
        // all prepared statements that are not callable require an
        // input param
        else {
            LOG.error("execute: ERROR, params were not provided " + "for this prepared statement: "
                    + getPrepared());
            return null;

        }
    } else if (params.size() > 0 && !isPrepared()) {
        LOG.error("execute: ERROR, params were provided "
                + "for this static or non-prepared statement that does not " + "require params: "
                + getOriginal());
        return null;
    }

    // make sure given params match
    if (params.size() > 0) {
        for (Map<String, String> pMap : params) {
            if (!isMatch(pMap.keySet())) {
                LOG.error("execute: ERROR, given key:value set does not match "
                        + "this statement's key:value set\n" + getKeyTokens().toString() + "  vs.  "
                        + params.toString());
                return null;
            }
        }
    }

    // if trace is on, dump params if any
    if (params.size() > 0 && LOG.isTraceEnabled()) {
        for (Map<String, String> pMap : params) {
            LOG.trace("execute: valid param set = " + pMap.toString());
        }
    }

    // A list that essentially represents the result set returned by the
    // DB for queries.
    List<Map<String, Object>> listOfMaps = new ArrayList<Map<String, Object>>();

    // dequeue a sqlResult object from the SqlResult cache
    SqlResult sqlResult = SqlResult.dequeue();

    try {
        // if this statement is call'able, then execute its stored procedure
        // object. Note that we don't support batching calls to stored
        // procedures and functions. Maybe that can be a future
        // enhancement...
        if (isCallable()) {
            LOG.debug("execute: invoking this stored procedure or function: " + getStoredProcName());
            Map<String, Object> kvMap = new HashMap<String, Object>();
            // first prepare the IN params (if any)
            if (params.size() > 0) {
                for (KeyValueObject kvObj : getPreparedObjects(params.get(0))) {
                    kvMap.put(kvObj.getKey(), kvObj.getObj());
                }
            }
            // now execute the function or stored proc
            // Note from Spring docs: The execute() method returns a
            // map with an entry for each declared output parameter,
            // using the parameter name as the key.
            kvMap = getStoredProcedure().execute(kvMap);
            // now that the execute has completed, fetch the OUT params
            // from the kvMap. i suppose it is possible for a stored proc
            // not to have any OUT params.

            // need to transfer each key:value that is associated with
            // the OUT param as a map to listOfMaps. However, those
            // keys that pertain to cursors or sets, point
            // to a List of Maps!!
            for (SqlToken sqlToken : getSortedKeyTokens()) {
                // skip IN only params; we're only looking for OUT params
                if (sqlToken.isIn()) {
                    continue;
                }
                Object outObj = kvMap.remove(sqlToken.getKey());
                if (outObj == null) {
                    LOG.error("execute: object was not returned for this " + "out param: " + sqlToken.getKey());
                    continue;
                }
                if (sqlToken.isCursor() || sqlToken.isRset()) {
                    if (outObj instanceof List) {
                        List<Map<String, Object>> mList = (List<Map<String, Object>>) outObj;
                        for (Map<String, Object> map : mList) {
                            listOfMaps.add(map);
                        }
                    } else {
                        LOG.error("execute: this OUT result set param did not return a type of List: "
                                + sqlToken.getKey());
                        LOG.error("execute: got this type/class instead: " + outObj.getClass().getName());
                    }
                } else {
                    Map<String, Object> map = new HashMap<String, Object>();
                    map.put(sqlToken.getKey(), outObj);
                    listOfMaps.add(map);
                }
            }
            /*
             * Any undeclared results returned are added to the output map
             * with generated names like "#result-set-1" "#result-set-2"
             * etc. You can change this by setting 'skipUndeclaredResults'
             * to true, and then these undeclared resultsets will be
             * skipped. TODO: look into the update count
             */
            if (!kvMap.isEmpty()) {
                LOG.debug("execute: looking for result sets");
                for (Object kvObj : kvMap.values()) {
                    if (kvObj instanceof List) {
                        for (Map<String, Object> map : (List<Map<String, Object>>) kvObj) {
                            listOfMaps.add(map);
                        }
                    } else {
                        LOG.debug(
                                "execute: unknown object returned from execute: " + kvObj.getClass().getName());
                        LOG.debug("execute: unknown object's toString value: " + kvObj.toString());
                    }
                }
            }
            sqlResult.setResultSet(listOfMaps);
            return sqlResult;

        } // if (isCallable()...

        // key:value type objects used for binding the input params to
        // prepared statements
        List<KeyValueObject> kvObjs = null;
        Object bindObjs[] = null;

        // is this a query; i.e., select statement?
        if (getSqlStmntType() == SqlStmntType.SELECT) {
            if (isPrepared()) {
                LOG.debug("execute: executing this prepared SELECT statement: " + getPrepared());
                kvObjs = getPreparedObjects(params.get(0));
                bindObjs = new Object[kvObjs.size()];
                for (int i = 0; i < bindObjs.length; i++) {
                    bindObjs[i] = kvObjs.get(i).getObj();
                }
                listOfMaps = getJdbcTemplate().query(getPrepared(), bindObjs, this);
            } else {
                LOG.trace("execute: executing this SELECT statement: " + getOriginal());
                listOfMaps = getJdbcTemplate().query(getOriginal(), this);
            }
            if (listOfMaps != null && listOfMaps.size() > 0) {
                LOG.trace("execute: dumping first map - " + listOfMaps.get(0).toString());
            }
            sqlResult.setResultSet(listOfMaps);
            return sqlResult;
        }

        // ok, this statement is neither call'able nor a query so it
        // must be either an update of some kind; i.e., insert, update or
        // delete

        // note that keyHolders are only used for INSERT statements!

        if (!isPrepared()) {
            PreparedStmntCreator creatorSetter = new PreparedStmntCreator(this, bindObjs);
            // i guess it is possible to have a non prepared update of some
            // sort
            if (getSqlStmntType() == SqlStmntType.INSERT) {
                GeneratedKeyHolder keyHolder = new GeneratedKeyHolder();
                sqlResult.setNumRows(getJdbcTemplate().update(creatorSetter, keyHolder));
                sqlResult.setKeyHolder(keyHolder);
            } else {
                sqlResult.setNumRows(getJdbcTemplate().update(getOriginal(), creatorSetter));
            }
        }

        // we have a prepared update; is the client requesting a batch
        // update?
        else if (params.size() > 1) {
            LOG.debug("execute: invoking batch update for this statement: " + getPrepared());
            // create the list of objects for the batch update
            List<Object[]> batchArgs = new ArrayList<Object[]>();
            for (Map<String, String> map : params) {
                // prepare the bind objects for the prepared
                // statement
                kvObjs = getPreparedObjects(map);
                bindObjs = new Object[kvObjs.size()];
                for (int i = 0; i < bindObjs.length; i++) {
                    bindObjs[i] = kvObjs.get(i).getObj();
                }
                batchArgs.add(bindObjs);
            }
            sqlResult.setBatchNumRows(getJdbcTemplate().batchUpdate(getPrepared(), batchArgs));
            // note that a key holder is not possible with a batch
            // update
        }

        // we have a prepared update, but it is not a batch update
        else if (params.size() == 1) {

            LOG.debug("execute: invoking prepared update for this statement: " + getPrepared());
            kvObjs = getPreparedObjects(params.get(0));
            bindObjs = new Object[kvObjs.size()];
            for (int i = 0; i < bindObjs.length; i++) {
                bindObjs[i] = kvObjs.get(i).getObj();
            }
            // note that PreparedStmntCreator is both a creator and setter
            PreparedStmntCreator creatorSetter = new PreparedStmntCreator(this, bindObjs);

            if (getSqlStmntType() == SqlStmntType.INSERT) {
                LOG.trace("execute: executing prepared INSERT statement");
                GeneratedKeyHolder keyHolder = new GeneratedKeyHolder();
                int numRows = getJdbcTemplate().update(creatorSetter, keyHolder);
                sqlResult.setNumRows(numRows);
                sqlResult.setKeyHolder(keyHolder);
            } else {
                LOG.trace("execute: executing UPDATE statement");
                int numRows = getJdbcTemplate().update(getPrepared(), creatorSetter);
                sqlResult.setNumRows(numRows);
            }
        }

    } catch (IllegalArgumentException exc) {
        LOG.error("execute: ERROR, caught this " + "IllegalArgumentException while executing sql: "
                + exc.toString());
        LOG.error("execute: exception stack trace follows:");
        dumpStackTrace(exc.getStackTrace());
        if (exc.getCause() != null) {
            LOG.error("execute: Caused by " + exc.getCause().toString());
            LOG.error("execute: causing exception stack trace follows:");
            dumpStackTrace(exc.getCause().getStackTrace());
        }
        if (sqlResult != null) {
            SqlResult.enqueue(sqlResult);
        }
        sqlResult = null;
    } catch (DataAccessException exc) {
        LOG.error("execute:ERROR, caught this " + "DataAccessException while executing sql: " + exc.toString());
        LOG.error("execute: exception stack trace follows:");
        dumpStackTrace(exc.getStackTrace());
        LOG.error("execute: Most Specific Cause = " + exc.getMostSpecificCause().toString());
        LOG.error("execute: MSC exception stack trace follows:");
        dumpStackTrace(exc.getMostSpecificCause().getStackTrace());
        if (sqlResult != null) {
            SqlResult.enqueue(sqlResult);
        }
        sqlResult = null;
    }
    return sqlResult;
}

From source file:uk.ac.cam.cl.dtg.segue.api.AdminFacade.java

/**
 * Get the event data for a specified user, in CSV format.
 *
 * @param request/* www  .  j av  a2s .c  om*/
 *            - request information used authentication
 * @param requestForCaching
 *            - request information used for caching.
 * @param httpServletRequest
 *            - the request which may contain session information.
 * @param fromDate
 *            - date to start search
 * @param toDate
 *            - date to end search
 * @param events
 *            - comma separated list of events of interest.,
 * @param bin
 *            - Should we group data into the first day of the month? true or false.
 * @return Returns a map of eventType to Map of dates to total number of events.
 */
@GET
@Path("/users/event_data/over_time/download")
@Produces("text/csv")
@GZIP
public Response getEventDataForAllUsersDownloadCSV(@Context final Request request,
        @Context final HttpServletRequest httpServletRequest, @Context final Request requestForCaching,
        @QueryParam("from_date") final Long fromDate, @QueryParam("to_date") final Long toDate,
        @QueryParam("events") final String events, @QueryParam("bin_data") final Boolean bin) {

    try {
        Map<String, Map<LocalDate, Long>> eventLogsByDate;
        eventLogsByDate = fetchEventDataForAllUsers(request, httpServletRequest, requestForCaching, fromDate,
                toDate, events, bin);

        StringWriter stringWriter = new StringWriter();
        CSVWriter csvWriter = new CSVWriter(stringWriter);
        List<String[]> rows = Lists.newArrayList();
        rows.add(new String[] { "event_type", "timestamp", "value" });

        for (Map.Entry<String, Map<LocalDate, Long>> eventType : eventLogsByDate.entrySet()) {
            String eventTypeKey = eventType.getKey();
            for (Map.Entry<LocalDate, Long> record : eventType.getValue().entrySet()) {
                rows.add(new String[] { eventTypeKey, record.getKey().toString(),
                        record.getValue().toString() });
            }
        }
        csvWriter.writeAll(rows);
        csvWriter.close();

        EntityTag etag = new EntityTag(eventLogsByDate.toString().hashCode() + "");
        Response cachedResponse = generateCachedResponse(requestForCaching, etag);
        if (cachedResponse != null) {
            return cachedResponse;
        }

        return Response.ok(stringWriter.toString()).tag(etag)
                .header("Content-Disposition", "attachment; filename=admin_stats.csv")
                .cacheControl(getCacheControl(NUMBER_SECONDS_IN_FIVE_MINUTES, false)).build();
    } catch (BadRequestException e) {
        return new SegueErrorResponse(Status.BAD_REQUEST, e.getMessage()).toResponse();
    } catch (ForbiddenException e) {
        return new SegueErrorResponse(Status.FORBIDDEN, e.getMessage()).toResponse();
    } catch (NoUserLoggedInException e) {
        return SegueErrorResponse.getNotLoggedInResponse();
    } catch (SegueDatabaseException e) {
        log.error("Database error while getting event details for a user.", e);
        return new SegueErrorResponse(Status.INTERNAL_SERVER_ERROR, "Unable to complete the request.")
                .toResponse();
    } catch (IOException e) {
        log.error("IO error while creating the CSV file.", e);
        return new SegueErrorResponse(Status.INTERNAL_SERVER_ERROR, "Error while creating the CSV file")
                .toResponse();
    }
}

From source file:com.alibaba.wasp.master.FMaster.java

public boolean balance() throws IOException {
    // if master not initialized, don't run balancer.
    if (!this.initialized) {
        LOG.debug("Master has not been initialized, don't run balancer.");
        return false;
    }/*from  w w  w .j  a v a 2  s  .co  m*/
    // If balance not true, don't run balancer.
    if (!this.loadBalancerTracker.isBalancerOn())
        return false;
    // Do this call outside of synchronized block.
    int maximumBalanceTime = getBalancerCutoffTime();
    long cutoffTime = System.currentTimeMillis() + maximumBalanceTime;
    boolean balancerRan;
    synchronized (this.balancer) {
        // Only allow one balance run at at time.
        if (this.assignmentManager.getEntityGroupStates().isEntityGroupsInTransition()) {
            Map<String, EntityGroupState> entityGroupsInTransition = this.assignmentManager
                    .getEntityGroupStates().getEntityGroupsInTransition();
            LOG.debug("Not running balancer because " + entityGroupsInTransition.size()
                    + " entityGroup(s) in transition: "
                    + org.apache.commons.lang.StringUtils.abbreviate(entityGroupsInTransition.toString(), 256));
            return false;
        }
        if (this.serverManager.areDeadServersInProgress()) {
            LOG.debug("Not running balancer because processing dead fserver(s): "
                    + this.serverManager.getDeadServers());
            return false;
        }

        Map<String, Map<ServerName, List<EntityGroupInfo>>> assignmentsByTable = this.assignmentManager
                .getEntityGroupStates().getAssignmentsByTable();

        List<EntityGroupPlan> plans = new ArrayList<EntityGroupPlan>();
        // Give the balancer the current cluster state.
        this.balancer.setClusterStatus(getClusterStatus());
        for (Map<ServerName, List<EntityGroupInfo>> assignments : assignmentsByTable.values()) {
            List<EntityGroupPlan> partialPlans = this.balancer.balanceCluster(assignments);
            if (partialPlans != null)
                plans.addAll(partialPlans);
        }
        int rpCount = 0; // number of EntityGroupPlans balanced so far
        long totalRegPlanExecTime = 0;
        balancerRan = plans != null;
        if (plans != null && !plans.isEmpty()) {
            for (EntityGroupPlan plan : plans) {
                LOG.info("balance " + plan);
                long balStartTime = System.currentTimeMillis();
                this.assignmentManager.balance(plan);
                totalRegPlanExecTime += System.currentTimeMillis() - balStartTime;
                rpCount++;
                if (rpCount < plans.size() &&
                // if performing next balance exceeds cutoff time, exit the loop
                        (System.currentTimeMillis() + (totalRegPlanExecTime / rpCount)) > cutoffTime) {
                    LOG.debug("No more balancing till next balance run; maximumBalanceTime="
                            + maximumBalanceTime);
                    break;
                }
            }
        }
    }
    return balancerRan;
}

From source file:org.kuali.kfs.module.purap.service.impl.ElectronicInvoiceHelperServiceImpl.java

protected PaymentRequestDocument createPaymentRequest(ElectronicInvoiceOrderHolder orderHolder) {

    if (LOG.isInfoEnabled()) {
        LOG.info("Creating Payment Request document");
    }//  ww  w  .  j a  v  a  2s .  co m

    KNSGlobalVariables.getMessageList().clear();

    validateInvoiceOrderValidForPREQCreation(orderHolder);

    if (LOG.isInfoEnabled()) {
        if (orderHolder.isInvoiceRejected()) {
            LOG.info("Not possible to convert einvoice details into payment request");
        } else {
            LOG.info("Payment request document creation validation succeeded");
        }
    }

    if (orderHolder.isInvoiceRejected()) {
        return null;
    }

    PaymentRequestDocument preqDoc = null;
    try {
        preqDoc = (PaymentRequestDocument) SpringContext.getBean(DocumentService.class).getNewDocument("PREQ");
    } catch (WorkflowException e) {
        String extraDescription = "Error=" + e.getMessage();
        ElectronicInvoiceRejectReason rejectReason = matchingService.createRejectReason(
                PurapConstants.ElectronicInvoice.PREQ_WORKLOW_EXCEPTION, extraDescription,
                orderHolder.getFileName());
        orderHolder.addInvoiceOrderRejectReason(rejectReason);
        LOG.error("Error creating Payment request document - " + e.getMessage());
        return null;
    }

    PurchaseOrderDocument poDoc = orderHolder.getPurchaseOrderDocument();
    if (poDoc == null) {
        throw new RuntimeException("Purchase Order document (POId=" + poDoc.getPurapDocumentIdentifier()
                + ") does not exist in the system");
    }

    preqDoc.getDocumentHeader().setDocumentDescription(generatePREQDocumentDescription(poDoc));
    try {
        preqDoc.updateAndSaveAppDocStatus(PurapConstants.PaymentRequestStatuses.APPDOC_IN_PROCESS);
    } catch (WorkflowException we) {
        throw new RuntimeException(
                "Unable to save route status data for document: " + preqDoc.getDocumentNumber(), we);
    }

    preqDoc.setInvoiceDate(orderHolder.getInvoiceDate());
    preqDoc.setInvoiceNumber(orderHolder.getInvoiceNumber());
    preqDoc.setVendorInvoiceAmount(new KualiDecimal(orderHolder.getInvoiceNetAmount()));
    preqDoc.setAccountsPayableProcessorIdentifier("E-Invoice");
    preqDoc.setVendorCustomerNumber(orderHolder.getCustomerNumber());
    preqDoc.setPaymentRequestElectronicInvoiceIndicator(true);

    if (orderHolder.getAccountsPayablePurchasingDocumentLinkIdentifier() != null) {
        preqDoc.setAccountsPayablePurchasingDocumentLinkIdentifier(
                orderHolder.getAccountsPayablePurchasingDocumentLinkIdentifier());
    }

    //Copied from PaymentRequestServiceImpl.populatePaymentRequest()
    //set bank code to default bank code in the system parameter
    Bank defaultBank = SpringContext.getBean(BankService.class).getDefaultBankByDocType(preqDoc.getClass());
    if (defaultBank != null) {
        preqDoc.setBankCode(defaultBank.getBankCode());
        preqDoc.setBank(defaultBank);
    }

    RequisitionDocument reqDoc = SpringContext.getBean(RequisitionService.class)
            .getRequisitionById(poDoc.getRequisitionIdentifier());
    String reqDocInitiator = reqDoc.getDocumentHeader().getWorkflowDocument().getInitiatorPrincipalId();
    try {
        Person user = KimApiServiceLocator.getPersonService().getPerson(reqDocInitiator);

        setProcessingCampus(preqDoc, user.getCampusCode());

    } catch (Exception e) {
        String extraDescription = "Error setting processing campus code - " + e.getMessage();
        ElectronicInvoiceRejectReason rejectReason = matchingService.createRejectReason(
                PurapConstants.ElectronicInvoice.PREQ_ROUTING_VALIDATION_ERROR, extraDescription,
                orderHolder.getFileName());
        orderHolder.addInvoiceOrderRejectReason(rejectReason);
        return null;
    }

    HashMap<String, ExpiredOrClosedAccountEntry> expiredOrClosedAccountList = SpringContext
            .getBean(AccountsPayableService.class).expiredOrClosedAccountsList(poDoc);
    if (expiredOrClosedAccountList == null) {
        expiredOrClosedAccountList = new HashMap();
    }

    if (LOG.isInfoEnabled()) {
        LOG.info(expiredOrClosedAccountList.size() + " accounts has been found as Expired or Closed");
    }

    preqDoc.populatePaymentRequestFromPurchaseOrder(orderHolder.getPurchaseOrderDocument(),
            expiredOrClosedAccountList);

    populateItemDetails(preqDoc, orderHolder);

    /**
     * Validate totals,paydate
     */
    //PaymentRequestDocumentRule.processCalculateAccountsPayableBusinessRules
    SpringContext.getBean(KualiRuleService.class)
            .applyRules(new AttributedCalculateAccountsPayableEvent(preqDoc));

    SpringContext.getBean(PaymentRequestService.class).calculatePaymentRequest(preqDoc, true);

    processItemsForDiscount(preqDoc, orderHolder);

    if (orderHolder.isInvoiceRejected()) {
        return null;
    }

    SpringContext.getBean(PaymentRequestService.class).calculatePaymentRequest(preqDoc, false);
    /**
     * PaymentRequestReview
     */
    //PaymentRequestDocumentRule.processRouteDocumentBusinessRules
    SpringContext.getBean(KualiRuleService.class)
            .applyRules(new AttributedPaymentRequestForEInvoiceEvent(preqDoc));

    if (GlobalVariables.getMessageMap().hasErrors()) {
        if (LOG.isInfoEnabled()) {
            LOG.info("***************Error in rules processing - " + GlobalVariables.getMessageMap());
        }
        Map<String, AutoPopulatingList<ErrorMessage>> errorMessages = GlobalVariables.getMessageMap()
                .getErrorMessages();

        String errors = errorMessages.toString();
        ElectronicInvoiceRejectReason rejectReason = matchingService.createRejectReason(
                PurapConstants.ElectronicInvoice.PREQ_ROUTING_VALIDATION_ERROR, errors,
                orderHolder.getFileName());
        orderHolder.addInvoiceOrderRejectReason(rejectReason);
        return null;
    }

    if (KNSGlobalVariables.getMessageList().size() > 0) {
        if (LOG.isInfoEnabled()) {
            LOG.info("Payment request contains " + KNSGlobalVariables.getMessageList().size()
                    + " warning message(s)");
            for (int i = 0; i < KNSGlobalVariables.getMessageList().size(); i++) {
                LOG.info("Warning " + i + "  - " + KNSGlobalVariables.getMessageList().get(i));
            }
        }
    }

    addShipToNotes(preqDoc, orderHolder);

    String routingAnnotation = null;
    if (!orderHolder.isRejectDocumentHolder()) {
        routingAnnotation = "Routed by electronic invoice batch job";
    }

    try {
        SpringContext.getBean(DocumentService.class).routeDocument(preqDoc, routingAnnotation, null);
    } catch (WorkflowException e) {
        e.printStackTrace();
        ElectronicInvoiceRejectReason rejectReason = matchingService.createRejectReason(
                PurapConstants.ElectronicInvoice.PREQ_ROUTING_FAILURE, e.getMessage(),
                orderHolder.getFileName());
        orderHolder.addInvoiceOrderRejectReason(rejectReason);
        return null;
    } catch (ValidationException e) {
        String extraDescription = GlobalVariables.getMessageMap().toString();
        ElectronicInvoiceRejectReason rejectReason = matchingService.createRejectReason(
                PurapConstants.ElectronicInvoice.PREQ_ROUTING_VALIDATION_ERROR, extraDescription,
                orderHolder.getFileName());
        orderHolder.addInvoiceOrderRejectReason(rejectReason);
        return null;
    }

    return preqDoc;
}

From source file:org.apache.eagle.alert.metric.MetricSystemTest.java

private SimpleConsumer assertMsgFromKafka(KafkaEmbedded kafkaEmbedded) throws IOException {
    SimpleConsumer consumer = new SimpleConsumer("localhost", kafkaEmbedded.getPort(), 100000, 64 * 1024,
            clientName);/*from  ww  w . java2  s  . co  m*/
    long readOffset = getLastOffset(consumer, TOPIC, 0, kafka.api.OffsetRequest.EarliestTime(), clientName);
    FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(TOPIC, 0, readOffset, 100000)
            .build();
    FetchResponse fetchResponse = consumer.fetch(req);
    Map<Integer, Map<String, String>> resultCollector = new HashMap<>();
    int count = 1;
    for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(TOPIC, 0)) {
        long currentOffset = messageAndOffset.offset();
        if (currentOffset < readOffset) {
            System.out.println("found an old offset: " + currentOffset + " expecting: " + readOffset);
            continue;
        }

        readOffset = messageAndOffset.nextOffset();
        ByteBuffer payload = messageAndOffset.message().payload();

        byte[] bytes = new byte[payload.limit()];
        payload.get(bytes);
        String message = new String(bytes, "UTF-8");
        Map<String, String> covertedMsg = JsonUtils.mapper.readValue(message, Map.class);
        covertedMsg.remove("timestamp");
        resultCollector.put(count, covertedMsg);
        count++;
    }
    Assert.assertEquals(
            "{1={name=heap.committed, value=175636480}, 2={name=heap.init, value=262144000}, 3={name=heap.max, value=3704094720}, 4={name=heap.usage, value=0.01570181876990446}, 5={name=heap.used, value=58491576}, 6={name=name, value=testname}, 7={name=non-heap.committed, value=36405248}, 8={name=non-heap.init, value=2555904}, 9={name=non-heap.max, value=-1}, 10={name=non-heap.usage, value=-3.5588712E7}, 11={name=non-heap.used, value=35596496}, 12={name=pools.Code-Cache.usage, value=0.020214080810546875}, 13={name=pools.Compressed-Class-Space.usage, value=0.0035556256771087646}, 14={name=pools.Metaspace.usage, value=0.9777212526244751}, 15={name=pools.PS-Eden-Space.usage, value=0.03902325058129612}, 16={name=pools.PS-Old-Gen.usage, value=0.001959359247654333}, 17={name=pools.PS-Survivor-Space.usage, value=0.0}, 18={name=total.committed, value=212107264}, 19={name=total.init, value=264699904}, 20={name=total.max, value=3704094719}, 21={name=total.used, value=94644240}, 22={name=uptime, value=testuptime}, 23={name=vendor, value=testvendor}}",
            resultCollector.toString());
    return consumer;
}

From source file:com.github.podd.resources.UploadArtifactResourceImpl.java

private InferredOWLOntologyID uploadFileAndLoadArtifactIntoPodd(final Representation entity)
        throws ResourceException {
    List<FileItem> items;// w w w . j a  v a2 s. c o m
    Path filePath = null;
    String contentType = null;

    // 1: Create a factory for disk-based file items
    final DiskFileItemFactory factory = new DiskFileItemFactory(1000240, this.tempDirectory.toFile());

    // 2: Create a new file upload handler
    final RestletFileUpload upload = new RestletFileUpload(factory);
    final Map<String, String> props = new HashMap<String, String>();
    try {
        // 3: Request is parsed by the handler which generates a list of
        // FileItems
        items = upload.parseRequest(this.getRequest());

        for (final FileItem fi : items) {
            final String name = fi.getName();

            if (name == null) {
                props.put(fi.getFieldName(), new String(fi.get(), StandardCharsets.UTF_8));
            } else {
                // FIXME: Strip everything up to the last . out of the
                // filename so that
                // the filename can be used for content type determination
                // where
                // possible.
                // InputStream uploadedFileInputStream =
                // fi.getInputStream();
                try {
                    // Note: These are Java-7 APIs
                    contentType = fi.getContentType();
                    props.put("Content-Type", fi.getContentType());

                    filePath = Files.createTempFile(this.tempDirectory, "ontologyupload-", name);
                    final File file = filePath.toFile();
                    file.deleteOnExit();
                    fi.write(file);
                } catch (final IOException ioe) {
                    throw ioe;
                } catch (final Exception e) {
                    // avoid throwing a generic exception just because the
                    // apache
                    // commons library throws Exception
                    throw new IOException(e);
                }
            }
        }
    } catch (final IOException | FileUploadException e) {
        throw new ResourceException(Status.CLIENT_ERROR_BAD_REQUEST, e);
    }

    this.log.info("props={}", props.toString());

    if (filePath == null) {
        throw new ResourceException(Status.CLIENT_ERROR_BAD_REQUEST,
                "Did not submit a valid file and filename");
    }

    this.log.info("filename={}", filePath.toAbsolutePath().toString());
    this.log.info("contentType={}", contentType);

    RDFFormat format = null;

    // If the content type was application/octet-stream then use the file
    // name instead
    // Browsers attach this content type when they are not sure what the
    // real type is
    if (MediaType.APPLICATION_OCTET_STREAM.getName().equals(contentType)) {
        format = Rio.getParserFormatForFileName(filePath.getFileName().toString());

        this.log.info("octet-stream contentType filename format={}", format);
    }
    // Otherwise use the content type directly in preference to using the
    // filename
    else if (contentType != null) {
        format = Rio.getParserFormatForMIMEType(contentType);

        this.log.info("non-octet-stream contentType format={}", format);
    }

    // If the content type choices failed to resolve the type, then try the
    // filename
    if (format == null) {
        format = Rio.getParserFormatForFileName(filePath.getFileName().toString());

        this.log.info("non-content-type filename format={}", format);
    }

    // Or fallback to RDF/XML which at minimum is able to detect when the
    // document is
    // structurally invalid
    if (format == null) {
        this.log.warn("Could not determine RDF format from request so falling back to RDF/XML");
        format = RDFFormat.RDFXML;
    }

    try (final InputStream inputStream = new BufferedInputStream(
            Files.newInputStream(filePath, StandardOpenOption.READ));) {
        return this.uploadFileAndLoadArtifactIntoPodd(inputStream, format, DanglingObjectPolicy.REPORT,
                DataReferenceVerificationPolicy.DO_NOT_VERIFY);
    } catch (final IOException e) {
        throw new ResourceException(Status.SERVER_ERROR_INTERNAL, "File IO error occurred", e);
    }

}

From source file:org.opendaylight.ovsdb.plugin.ConfigurationService.java

public void _addPort(CommandInterpreter ci) {
    String nodeName = ci.nextArgument();
    if (nodeName == null) {
        ci.println("Please enter Node Name");
        return;/*from   w  ww  .  j  a  v a 2s. com*/
    }

    String bridgeName = ci.nextArgument();
    if (bridgeName == null) {
        ci.println("Please enter Bridge Name");
        return;
    }

    String portName = ci.nextArgument();
    if (portName == null) {
        ci.println("Please enter Port Name");
        return;
    }

    String type = ci.nextArgument();

    Map<String, String> configs = new HashMap<String, String>();
    while (true) {
        String configKey = ci.nextArgument();
        if (configKey == null)
            break;
        String configValue = ci.nextArgument();
        if (configValue == null)
            break;
        configs.put(configKey, configValue);
    }

    Map<ConfigConstants, Object> customConfigs = null;
    if (type != null) {
        customConfigs = new HashMap<ConfigConstants, Object>();
        customConfigs.put(ConfigConstants.TYPE, type);
    }

    if (configs.size() > 0) {
        if (customConfigs == null)
            customConfigs = new HashMap<ConfigConstants, Object>();
        customConfigs.put(ConfigConstants.CUSTOM, configs);
        ci.println(customConfigs.toString());
    }
    Status status;
    Node node = Node.fromString(nodeName);
    if (node == null) {
        ci.println("Invalid Node");
        return;
    }
    status = this.addPort(node, bridgeName, portName, customConfigs);
    ci.println("Port creation status : " + status.toString());
}

From source file:org.apache.solr.TestDistributedSearch.java

@Test
public void test() throws Exception {
    QueryResponse rsp = null;// w  w  w.j a va 2 s .  c  o m
    int backupStress = stress; // make a copy so we can restore

    del("*:*");
    indexr(id, 1, i1, 100, tlong, 100, t1, "now is the time for all good men", "foo_sev_enum", "Medium",
            tdate_a, "2010-04-20T11:00:00Z", tdate_b, "2009-08-20T11:00:00Z", "foo_f", 1.414f, "foo_b", "true",
            "foo_d", 1.414d, s1, "z${foo}");
    indexr(id, 2, i1, 50, tlong, 50, t1, "to come to the aid of their country.", "foo_sev_enum", "Medium",
            "foo_sev_enum", "High", tdate_a, "2010-05-02T11:00:00Z", tdate_b, "2009-11-02T11:00:00Z", s1,
            "z${foo}");
    indexr(id, 3, i1, 2, tlong, 2, t1, "how now brown cow", tdate_a, "2010-05-03T11:00:00Z", s1, "z${foo}");
    indexr(id, 4, i1, -100, tlong, 101, t1, "the quick fox jumped over the lazy dog", tdate_a,
            "2010-05-03T11:00:00Z", tdate_b, "2010-05-03T11:00:00Z", s1, "a");
    indexr(id, 5, i1, 500, tlong, 500, t1, "the quick fox jumped way over the lazy dog", tdate_a,
            "2010-05-05T11:00:00Z", s1, "b");
    indexr(id, 6, i1, -600, tlong, 600, t1, "humpty dumpy sat on a wall", s1, "c");
    indexr(id, 7, i1, 123, tlong, 123, t1, "humpty dumpy had a great fall", s1, "d");
    indexr(id, 8, i1, 876, tlong, 876, tdate_b, "2010-01-05T11:00:00Z", "foo_sev_enum", "High", t1,
            "all the kings horses and all the kings men", s1, "e");
    indexr(id, 9, i1, 7, tlong, 7, t1, "couldn't put humpty together again", s1, "f");

    commit(); // try to ensure there's more than one segment

    indexr(id, 10, i1, 4321, tlong, 4321, t1, "this too shall pass", s1, "g");
    indexr(id, 11, i1, -987, tlong, 987, "foo_sev_enum", "Medium", t1,
            "An eye for eye only ends up making the whole world blind.", s1, "h");
    indexr(id, 12, i1, 379, tlong, 379, t1, "Great works are performed, not by strength, but by perseverance.",
            s1, "i");
    indexr(id, 13, i1, 232, tlong, 232, t1, "no eggs on wall, lesson learned", oddField, "odd man out", s1,
            "j");

    indexr(id, "1001", "lowerfilt", "toyota", s1, "k"); // for spellcheck

    indexr(id, 14, "SubjectTerms_mfacet", new String[] { "mathematical models", "mathematical analysis" }, s1,
            "l");
    indexr(id, 15, "SubjectTerms_mfacet", new String[] { "test 1", "test 2", "test3" });
    indexr(id, 16, "SubjectTerms_mfacet", new String[] { "test 1", "test 2", "test3" });
    String[] vals = new String[100];
    for (int i = 0; i < 100; i++) {
        vals[i] = "test " + i;
    }
    indexr(id, 17, "SubjectTerms_mfacet", vals);

    for (int i = 100; i < 150; i++) {
        indexr(id, i);
    }

    commit();

    handle.clear();
    handle.put("timestamp", SKIPVAL);
    handle.put("_version_", SKIPVAL); // not a cloud test, but may use updateLog

    //Test common query parameters.
    validateCommonQueryParameters();

    // random value sort
    for (String f : fieldNames) {
        query("q", "*:*", "sort", f + " desc");
        query("q", "*:*", "sort", f + " asc");
    }

    // these queries should be exactly ordered and scores should exactly match
    query("q", "*:*", "sort", i1 + " desc");
    query("q", "*:*", "sort", "{!func}testfunc(add(" + i1 + ",5))" + " desc");
    query("q", "*:*", "sort", i1 + " asc");
    query("q", "*:*", "sort", i1 + " desc", "fl", "*,score");
    query("q", "*:*", "sort", "n_tl1 asc", "fl", "*,score");
    query("q", "*:*", "sort", "n_tl1 desc");
    handle.put("maxScore", SKIPVAL);
    query("q", "{!func}" + i1);// does not expect maxScore. So if it comes ,ignore it. JavaBinCodec.writeSolrDocumentList()
    //is agnostic of request params.
    handle.remove("maxScore");
    query("q", "{!func}" + i1, "fl", "*,score"); // even scores should match exactly here

    handle.put("highlighting", UNORDERED);
    handle.put("response", UNORDERED);

    handle.put("maxScore", SKIPVAL);
    query("q", "quick");
    query("q", "all", "fl", "id", "start", "0");
    query("q", "all", "fl", "foofoofoo", "start", "0"); // no fields in returned docs
    query("q", "all", "fl", "id", "start", "100");

    handle.put("score", SKIPVAL);
    query("q", "quick", "fl", "*,score");
    query("q", "all", "fl", "*,score", "start", "1");
    query("q", "all", "fl", "*,score", "start", "100");

    query("q", "now their fox sat had put", "fl", "*,score", "hl", "true", "hl.fl", t1);

    query("q", "now their fox sat had put", "fl", "foofoofoo", "hl", "true", "hl.fl", t1);

    query("q", "matchesnothing", "fl", "*,score");

    // test that a single NOW value is propagated to all shards... if that is true
    // then the primary sort should always be a tie and then the secondary should always decide
    query("q", "{!func}ms(NOW)", "sort", "score desc," + i1 + " desc", "fl", "id");

    query("q", "*:*", "rows", 0, "facet", "true", "facet.field", t1, "facet.field", t1);
    query("q", "*:*", "rows", 0, "facet", "true", "facet.field", t1, "facet.limit", 1);
    query("q", "*:*", "rows", 0, "facet", "true", "facet.query", "quick", "facet.query", "quick", "facet.query",
            "all", "facet.query", "*:*");
    query("q", "*:*", "rows", 0, "facet", "true", "facet.field", t1, "facet.mincount", 2);

    // a facet query to test out chars out of the ascii range
    query("q", "*:*", "rows", 0, "facet", "true", "facet.query",
            "{!term f=foo_s}international\u00ff\u01ff\u2222\u3333");

    // simple field facet on date fields
    rsp = query("q", "*:*", "rows", 0, "facet", "true", "facet.limit", 1, // TODO: limit shouldn't be needed: SOLR-6386
            "facet.field", tdate_a);
    assertEquals(1, rsp.getFacetFields().size());
    rsp = query("q", "*:*", "rows", 0, "facet", "true", "facet.limit", 1, // TODO: limit shouldn't be needed: SOLR-6386
            "facet.field", tdate_b, "facet.field", tdate_a);
    assertEquals(2, rsp.getFacetFields().size());

    String facetQuery = "id:[1 TO 15]";

    // simple range facet on one field
    query("q", facetQuery, "rows", 100, "facet", "true", "facet.range", tlong, "facet.range", tlong,
            "facet.range.start", 200, "facet.range.gap", 100, "facet.range.end", 900, "facet.range.method",
            FacetRangeMethod.FILTER);

    // simple range facet on one field using dv method
    query("q", facetQuery, "rows", 100, "facet", "true", "facet.range", tlong, "facet.range", tlong,
            "facet.range.start", 200, "facet.range.gap", 100, "facet.range.end", 900, "facet.range.method",
            FacetRangeMethod.DV);

    // range facet on multiple fields
    query("q", facetQuery, "rows", 100, "facet", "true", "facet.range", tlong, "facet.range", i1,
            "f." + i1 + ".facet.range.start", 300, "f." + i1 + ".facet.range.gap", 87, "facet.range.end", 900,
            "facet.range.start", 200, "facet.range.gap", 100, "f." + tlong + ".facet.range.end", 900,
            "f." + i1 + ".facet.range.method", FacetRangeMethod.FILTER, "f." + tlong + ".facet.range.method",
            FacetRangeMethod.DV);

    // range facet with "other" param
    QueryResponse response = query("q", facetQuery, "rows", 100, "facet", "true", "facet.range", tlong,
            "facet.range.start", 200, "facet.range.gap", 100, "facet.range.end", 900, "facet.range.other",
            "all");
    assertEquals(tlong, response.getFacetRanges().get(0).getName());
    assertEquals(new Integer(6), response.getFacetRanges().get(0).getBefore());
    assertEquals(new Integer(5), response.getFacetRanges().get(0).getBetween());
    assertEquals(new Integer(2), response.getFacetRanges().get(0).getAfter());

    // Test mincounts. Do NOT want to go through all the stuff where with validateControlData in query() method
    // Purposely packing a _bunch_ of stuff together here to insure that the proper level of mincount is used for
    // each
    ModifiableSolrParams minParams = new ModifiableSolrParams();
    minParams.set("q", "*:*");
    minParams.set("rows", 1);
    minParams.set("facet", "true");
    minParams.set("facet.missing", "true");
    minParams.set("facet.field", i1);
    minParams.set("facet.missing", "true");
    minParams.set("facet.mincount", 2);

    // Return a separate section of ranges over i1. Should respect global range mincount
    minParams.set("facet.range", i1);
    minParams.set("f." + i1 + ".facet.range.start", 0);
    minParams.set("f." + i1 + ".facet.range.gap", 200);
    minParams.set("f." + i1 + ".facet.range.end", 1200);
    minParams.set("f." + i1 + ".facet.mincount", 4);

    // Return a separate section of ranges over tlong Should respect facet.mincount
    minParams.add("facet.range", tlong);
    minParams.set("f." + tlong + ".facet.range.start", 0);
    minParams.set("f." + tlong + ".facet.range.gap", 100);
    minParams.set("f." + tlong + ".facet.range.end", 1200);
    // Repeat with a range type of date
    minParams.add("facet.range", tdate_b);
    minParams.set("f." + tdate_b + ".facet.range.start", "2009-02-01T00:00:00Z");
    minParams.set("f." + tdate_b + ".facet.range.gap", "+1YEAR");
    minParams.set("f." + tdate_b + ".facet.range.end", "2011-01-01T00:00:00Z");
    minParams.set("f." + tdate_b + ".facet.mincount", 3);

    // Insure that global mincount is respected for facet queries
    minParams.set("facet.query", tdate_a + ":[2010-01-01T00:00:00Z TO 2011-01-01T00:00:00Z]"); // Should return some counts
    //minParams.set("facet.query", tdate_a + ":[* TO *]"); // Should be removed
    minParams.add("facet.query", tdate_b + ":[2008-01-01T00:00:00Z TO 2009-09-01T00:00:00Z]"); // Should be removed from response

    setDistributedParams(minParams);
    QueryResponse minResp = queryServer(minParams);

    ModifiableSolrParams eParams = new ModifiableSolrParams();
    eParams.set("q", tdate_b + ":[* TO *]");
    eParams.set("rows", 1000);
    eParams.set("fl", tdate_b);
    setDistributedParams(eParams);
    QueryResponse eResp = queryServer(eParams);

    // Check that exactly the right numbers of counts came through
    assertEquals("Should be exactly 2 range facets returned after minCounts taken into account ", 3,
            minResp.getFacetRanges().size());
    assertEquals("Should only be 1 query facets returned after minCounts taken into account ", 1,
            minResp.getFacetQuery().size());

    checkMinCountsField(minResp.getFacetField(i1).getValues(), new Object[] { null, 55L }); // Should just be the null entries for field

    checkMinCountsRange(minResp.getFacetRanges().get(0).getCounts(), new Object[] { "0", 5L }); // range on i1
    checkMinCountsRange(minResp.getFacetRanges().get(1).getCounts(), new Object[] { "0", 3L, "100", 3L }); // range on tlong
    checkMinCountsRange(minResp.getFacetRanges().get(2).getCounts(),
            new Object[] { "2009-02-01T00:00:00Z", 3L }); // date (range) on tvh

    assertTrue("Should have a facet for tdate_a",
            minResp.getFacetQuery().containsKey("a_n_tdt:[2010-01-01T00:00:00Z TO 2011-01-01T00:00:00Z]"));
    int qCount = minResp.getFacetQuery().get("a_n_tdt:[2010-01-01T00:00:00Z TO 2011-01-01T00:00:00Z]");
    assertEquals("tdate_a should be 5", qCount, 5);

    // Now let's do some queries, the above is getting too complex
    minParams = new ModifiableSolrParams();
    minParams.set("q", "*:*");
    minParams.set("rows", 1);
    minParams.set("facet", "true");
    minParams.set("facet.mincount", 3);

    minParams.set("facet.query", tdate_a + ":[2010-01-01T00:00:00Z TO 2010-05-04T00:00:00Z]");
    minParams.add("facet.query", tdate_b + ":[2009-01-01T00:00:00Z TO 2010-01-01T00:00:00Z]"); // Should be removed
    setDistributedParams(minParams);
    minResp = queryServer(minParams);

    assertEquals("Should only be 1 query facets returned after minCounts taken into account ", 1,
            minResp.getFacetQuery().size());
    assertTrue("Should be an entry for a_n_tdt",
            minResp.getFacetQuery().containsKey("a_n_tdt:[2010-01-01T00:00:00Z TO 2010-05-04T00:00:00Z]"));
    qCount = minResp.getFacetQuery().get("a_n_tdt:[2010-01-01T00:00:00Z TO 2010-05-04T00:00:00Z]");
    assertEquals("a_n_tdt should have a count of 4 ", qCount, 4);
    //  variations of fl
    query("q", "*:*", "fl", "score", "sort", i1 + " desc");
    query("q", "*:*", "fl", i1 + ",score", "sort", i1 + " desc");
    query("q", "*:*", "fl", i1, "fl", "score", "sort", i1 + " desc");
    query("q", "*:*", "fl", "id," + i1, "sort", i1 + " desc");
    query("q", "*:*", "fl", "id", "fl", i1, "sort", i1 + " desc");
    query("q", "*:*", "fl", i1, "fl", "id", "sort", i1 + " desc");
    query("q", "*:*", "fl", "id", "fl", nint, "fl", tint, "sort", i1 + " desc");
    query("q", "*:*", "fl", nint, "fl", "id", "fl", tint, "sort", i1 + " desc");
    handle.put("did", SKIPVAL);
    query("q", "*:*", "fl", "did:[docid]", "sort", i1 + " desc");
    handle.remove("did");
    query("q", "*:*", "fl", "log(" + tlong + "),abs(" + tlong + "),score", "sort", i1 + " desc");
    query("q", "*:*", "fl", "n_*", "sort", i1 + " desc");

    // basic spellcheck testing
    query("q", "toyata", "fl", "id,lowerfilt", "spellcheck", true, "spellcheck.q", "toyata", "qt",
            "spellCheckCompRH_Direct", "shards.qt", "spellCheckCompRH_Direct");

    stress = 0; // turn off stress... we want to tex max combos in min time
    for (int i = 0; i < 25 * RANDOM_MULTIPLIER; i++) {
        String f = fieldNames[random().nextInt(fieldNames.length)];
        if (random().nextBoolean())
            f = t1; // the text field is a really interesting one to facet on (and it's multi-valued too)

        // we want a random query and not just *:* so we'll get zero counts in facets also
        // TODO: do a better random query
        String q = random().nextBoolean() ? "*:*"
                : "id:(1 3 5 7 9 11 13) OR id:[100 TO " + random().nextInt(50) + "]";

        int nolimit = random().nextBoolean() ? -1 : 10000; // these should be equivalent

        // if limit==-1, we should always get exact matches
        query("q", q, "rows", 0, "facet", "true", "facet.field", f, "facet.limit", nolimit, "facet.sort",
                "count", "facet.mincount", random().nextInt(5), "facet.offset", random().nextInt(10));
        query("q", q, "rows", 0, "facet", "true", "facet.field", f, "facet.limit", nolimit, "facet.sort",
                "index", "facet.mincount", random().nextInt(5), "facet.offset", random().nextInt(10));
        // for index sort, we should get exact results for mincount <= 1
        query("q", q, "rows", 0, "facet", "true", "facet.field", f, "facet.sort", "index", "facet.mincount",
                random().nextInt(2), "facet.offset", random().nextInt(10), "facet.limit",
                random().nextInt(11) - 1);
    }
    stress = backupStress; // restore stress

    // test faceting multiple things at once
    query("q", "*:*", "rows", 0, "facet", "true", "facet.query", "quick", "facet.query", "all", "facet.query",
            "*:*", "facet.field", t1);

    // test filter tagging, facet exclusion, and naming (multi-select facet support)
    queryAndCompareUIF("q", "*:*", "rows", 0, "facet", "true", "facet.query", "{!key=myquick}quick",
            "facet.query", "{!key=myall ex=a}all", "facet.query", "*:*", "facet.field",
            "{!key=mykey ex=a}" + t1, "facet.field", "{!key=other ex=b}" + t1, "facet.field",
            "{!key=again ex=a,b}" + t1, "facet.field", t1, "fq", "{!tag=a}id:[1 TO 7]", "fq",
            "{!tag=b}id:[3 TO 9]");
    queryAndCompareUIF("q", "*:*", "facet", "true", "facet.field", "{!ex=t1}SubjectTerms_mfacet", "fq",
            "{!tag=t1}SubjectTerms_mfacet:(test 1)", "facet.limit", "10", "facet.mincount", "1");

    // test field that is valid in schema but missing in all shards
    query("q", "*:*", "rows", 100, "facet", "true", "facet.field", missingField, "facet.mincount", 2);
    // test field that is valid in schema and missing in some shards
    query("q", "*:*", "rows", 100, "facet", "true", "facet.field", oddField, "facet.mincount", 2);

    query("q", "*:*", "sort", i1 + " desc", "stats", "true", "stats.field", "stats_dt");
    query("q", "*:*", "sort", i1 + " desc", "stats", "true", "stats.field", i1);

    handle.put("stddev", FUZZY);
    handle.put("sumOfSquares", FUZZY);
    query("q", "*:*", "sort", i1 + " desc", "stats", "true", "stats.field", tdate_a);
    query("q", "*:*", "sort", i1 + " desc", "stats", "true", "stats.field", tdate_b);
    handle.remove("stddev");
    handle.remove("sumOfSquares");

    rsp = query("q", "*:*", "sort", i1 + " desc", "stats", "true", "stats.field",
            "{!cardinality='true'}" + oddField, "stats.field", "{!cardinality='true'}" + tlong);

    { // don't leak variabls

        // long
        FieldStatsInfo s = rsp.getFieldStatsInfo().get(tlong);
        assertNotNull("missing stats", s);
        assertEquals("wrong cardinality", new Long(13), s.getCardinality());
        //
        assertNull("expected null for min", s.getMin());
        assertNull("expected null for mean", s.getMean());
        assertNull("expected null for count", s.getCount());
        assertNull("expected null for calcDistinct", s.getCountDistinct());
        assertNull("expected null for distinct vals", s.getDistinctValues());
        assertNull("expected null for max", s.getMax());
        assertNull("expected null for missing", s.getMissing());
        assertNull("expected null for stddev", s.getStddev());
        assertNull("expected null for sum", s.getSum());
        assertNull("expected null for percentiles", s.getSum());

        // string
        s = rsp.getFieldStatsInfo().get(oddField);
        assertNotNull("missing stats", s);
        assertEquals("wrong cardinality", new Long(1), s.getCardinality());
        //
        assertNull("expected null for min", s.getMin());
        assertNull("expected null for mean", s.getMean());
        assertNull("expected null for count", s.getCount());
        assertNull("expected null for calcDistinct", s.getCountDistinct());
        assertNull("expected null for distinct vals", s.getDistinctValues());
        assertNull("expected null for max", s.getMax());
        assertNull("expected null for missing", s.getMissing());
        assertNull("expected null for stddev", s.getStddev());
        assertNull("expected null for sum", s.getSum());
        assertNull("expected null for percentiles", s.getSum());
    }

    query("q", "*:*", "sort", i1 + " desc", "stats", "true", "stats.field", "{!percentiles='1,2,3,4,5'}" + i1);

    query("q", "*:*", "sort", i1 + " desc", "stats", "true", "stats.field",
            "{!percentiles='1,20,30,40,98,99,99.9'}" + i1);

    rsp = query("q", "*:*", "sort", i1 + " desc", "stats", "true", "stats.field",
            "{!percentiles='1.0,99.999,0.001'}" + tlong);
    { // don't leak variabls
        Double[] expectedKeys = new Double[] { 1.0D, 99.999D, 0.001D };
        Double[] expectedVals = new Double[] { 2.0D, 4320.0D, 2.0D };
        FieldStatsInfo s = rsp.getFieldStatsInfo().get(tlong);
        assertNotNull("no stats for " + tlong, s);

        Map<Double, Double> p = s.getPercentiles();
        assertNotNull("no percentils", p);
        assertEquals("insufficient percentiles", expectedKeys.length, p.size());
        Iterator<Double> actualKeys = p.keySet().iterator();
        for (int i = 0; i < expectedKeys.length; i++) {
            Double expectedKey = expectedKeys[i];
            assertTrue("Ran out of actual keys as of : " + i + "->" + expectedKey, actualKeys.hasNext());
            assertEquals(expectedKey, actualKeys.next());
            assertEquals("percentiles are off: " + p.toString(), expectedVals[i], p.get(expectedKey), 1.0D);
        }

        //
        assertNull("expected null for count", s.getMin());
        assertNull("expected null for count", s.getMean());
        assertNull("expected null for count", s.getCount());
        assertNull("expected null for calcDistinct", s.getCountDistinct());
        assertNull("expected null for distinct vals", s.getDistinctValues());
        assertNull("expected null for max", s.getMax());
        assertNull("expected null for missing", s.getMissing());
        assertNull("expected null for stddev", s.getStddev());
        assertNull("expected null for sum", s.getSum());
    }

    query("q", "*:*", "sort", i1 + " desc", "stats", "true", "stats.field",
            "{!percentiles='1,20,50,80,99'}" + tdate_a);

    query("q", "*:*", "sort", i1 + " desc", "stats", "true", "fq", "{!tag=nothing}-*:*", "stats.field",
            "{!key=special_key ex=nothing}stats_dt");
    query("q", "*:*", "sort", i1 + " desc", "stats", "true", "f.stats_dt.stats.calcdistinct", "true",
            "stats.field", "{!key=special_key}stats_dt");
    query("q", "*:*", "sort", i1 + " desc", "stats", "true", "f.stats_dt.stats.calcdistinct", "true", "fq",
            "{!tag=xxx}id:[3 TO 9]", "stats.field", "{!key=special_key}stats_dt", "stats.field",
            "{!ex=xxx}stats_dt");

    handle.put("stddev", FUZZY);
    handle.put("sumOfSquares", FUZZY);
    query("q", "*:*", "sort", i1 + " desc", "stats", "true",
            // do a really simple query so distributed IDF doesn't cause problems
            // when comparing with control collection
            "stats.field", "{!lucene key=q_key}" + i1 + "foo_b:true", "stats.field",
            "{!func key=f_key}sum(" + tlong + "," + i1 + ")");

    query("q", "*:*", "sort", i1 + " desc", "stats", "true", "stats.field", "stats_dt", "stats.field", i1,
            "stats.field", tdate_a, "stats.field", tdate_b);

    // only ask for "min" and "mean", explicitly exclude deps of mean, whitebox check shard responses
    try {
        RequestTrackingQueue trackingQueue = new RequestTrackingQueue();
        TrackingShardHandlerFactory.setTrackingQueue(jettys, trackingQueue);

        rsp = query("q", "*:*", "sort", i1 + " desc", "stats", "true", "stats.field",
                "{!min=true sum=false mean=true count=false}" + i1);
        FieldStatsInfo s = rsp.getFieldStatsInfo().get(i1);
        assertNotNull("no stats for " + i1, s);
        //
        assertEquals("wrong min", -987.0D, (Double) s.getMin(), 0.0001D);
        assertEquals("wrong mean", 377.153846D, (Double) s.getMean(), 0.0001D);
        //
        assertNull("expected null for count", s.getCount());
        assertNull("expected null for calcDistinct", s.getCountDistinct());
        assertNull("expected null for distinct vals", s.getDistinctValues());
        assertNull("expected null for max", s.getMax());
        assertNull("expected null for missing", s.getMissing());
        assertNull("expected null for stddev", s.getStddev());
        assertNull("expected null for sum", s.getSum());
        assertNull("expected null for percentiles", s.getPercentiles());
        assertNull("expected null for cardinality", s.getCardinality());

        // sanity check deps relationship
        for (Stat dep : EnumSet.of(Stat.sum, Stat.count)) {
            assertTrue("Purpose of this test is to ensure that asking for some stats works even when the deps "
                    + "of those stats are explicitly excluded -- but the expected dep relationshp is no longer valid. "
                    + "ie: who changed the code and didn't change this test?, expected: " + dep,
                    Stat.mean.getDistribDeps().contains(dep));
        }

        // check our shard requests & responses - ensure we didn't get unneccessary stats from every shard
        int numStatsShardRequests = 0;
        EnumSet<Stat> shardStatsExpected = EnumSet.of(Stat.min, Stat.sum, Stat.count);
        for (List<ShardRequestAndParams> shard : trackingQueue.getAllRequests().values()) {
            for (ShardRequestAndParams shardReq : shard) {
                if (shardReq.params.getBool(StatsParams.STATS, false)) {
                    numStatsShardRequests++;
                    for (ShardResponse shardRsp : shardReq.sreq.responses) {
                        NamedList<Object> shardStats = ((NamedList<NamedList<NamedList<Object>>>) shardRsp
                                .getSolrResponse().getResponse().get("stats")).get("stats_fields").get(i1);

                        assertNotNull("no stard stats for " + i1, shardStats);
                        //
                        for (Map.Entry<String, Object> entry : shardStats) {
                            Stat found = Stat.forName(entry.getKey());
                            assertNotNull("found shardRsp stat key we were not expecting: " + entry, found);
                            assertTrue("found stat we were not expecting: " + entry,
                                    shardStatsExpected.contains(found));

                        }
                    }
                }
            }
        }
        assertTrue("did't see any stats=true shard requests", 0 < numStatsShardRequests);
    } finally {
        TrackingShardHandlerFactory.setTrackingQueue(jettys, null);
    }

    // only ask for "min", "mean" and "stddev",
    rsp = query("q", "*:*", "sort", i1 + " desc", "stats", "true", "stats.field",
            "{!min=true mean=true stddev=true}" + i1);
    { // don't leak variables 
        FieldStatsInfo s = rsp.getFieldStatsInfo().get(i1);
        assertNotNull("no stats for " + i1, s);
        //
        assertEquals("wrong min", -987.0D, (Double) s.getMin(), 0.0001D);
        assertEquals("wrong mean", 377.153846D, (Double) s.getMean(), 0.0001D);
        assertEquals("wrong stddev", 1271.76215D, (Double) s.getStddev(), 0.0001D);
        //
        assertNull("expected null for count", s.getCount());
        assertNull("expected null for calcDistinct", s.getCountDistinct());
        assertNull("expected null for distinct vals", s.getDistinctValues());
        assertNull("expected null for max", s.getMax());
        assertNull("expected null for missing", s.getMissing());
        assertNull("expected null for sum", s.getSum());
        assertNull("expected null for percentiles", s.getPercentiles());
        assertNull("expected null for cardinality", s.getCardinality());
    }

    // request stats, but disable them all via param refs
    rsp = query("q", "*:*", "sort", i1 + " desc", "stats", "true", "doMin", "false", "stats.field",
            "{!min=$doMin}" + i1);
    { // don't leak variables 
        FieldStatsInfo s = rsp.getFieldStatsInfo().get(i1);
        // stats section should exist, even though stats should be null
        assertNotNull("no stats for " + i1, s);
        //
        assertNull("expected null for min", s.getMin());
        assertNull("expected null for mean", s.getMean());
        assertNull("expected null for stddev", s.getStddev());
        //
        assertNull("expected null for count", s.getCount());
        assertNull("expected null for calcDistinct", s.getCountDistinct());
        assertNull("expected null for distinct vals", s.getDistinctValues());
        assertNull("expected null for max", s.getMax());
        assertNull("expected null for missing", s.getMissing());
        assertNull("expected null for sum", s.getSum());
        assertNull("expected null for percentiles", s.getPercentiles());
        assertNull("expected null for cardinality", s.getCardinality());
    }

    final String[] stats = new String[] { "min", "max", "sum", "sumOfSquares", "stddev", "mean", "missing",
            "count" };

    // ask for arbitrary pairs of stats
    for (String stat1 : stats) {
        for (String stat2 : stats) {
            // NOTE: stat1 might equal stat2 - good edge case to test for

            rsp = query("q", "*:*", "sort", i1 + " desc", "stats", "true", "stats.field",
                    "{!" + stat1 + "=true " + stat2 + "=true}" + i1);

            final List<String> statsExpected = new ArrayList<String>(2);
            statsExpected.add(stat1);
            if (!stat1.equals(stat2)) {
                statsExpected.add(stat2);
            }

            // ignore the FieldStatsInfo convinience class, and look directly at the NamedList
            // so we don't need any sort of crazy reflection
            NamedList<Object> svals = ((NamedList<NamedList<NamedList<Object>>>) rsp.getResponse().get("stats"))
                    .get("stats_fields").get(i1);

            assertNotNull("no stats for field " + i1, svals);
            assertEquals("wrong quantity of stats", statsExpected.size(), svals.size());

            for (String s : statsExpected) {
                assertNotNull("stat shouldn't be null: " + s, svals.get(s));
                assertTrue("stat should be a Number: " + s + " -> " + svals.get(s).getClass(),
                        svals.get(s) instanceof Number);
                // some loose assertions since we're iterating over various stats
                if (svals.get(s) instanceof Double) {
                    Double val = (Double) svals.get(s);
                    assertFalse("stat shouldn't be NaN: " + s, val.isNaN());
                    assertFalse("stat shouldn't be Inf: " + s, val.isInfinite());
                    assertFalse("stat shouldn't be 0: " + s, val.equals(0.0D));
                } else {
                    // count or missing
                    assertTrue("stat should be count of missing: " + s,
                            ("count".equals(s) || "missing".equals(s)));
                    assertTrue("stat should be a Long: " + s + " -> " + svals.get(s).getClass(),
                            svals.get(s) instanceof Long);
                    Long val = (Long) svals.get(s);
                    assertFalse("stat shouldn't be 0: " + s, val.equals(0L));
                }
            }
        }
    }

    // all of these diff ways of asking for min & calcdistinct should have the same result
    for (SolrParams p : new SolrParams[] { params("stats.field", "{!min=true calcdistinct=true}" + i1),
            params("stats.calcdistinct", "true", "stats.field", "{!min=true}" + i1),
            params("f." + i1 + ".stats.calcdistinct", "true", "stats.field", "{!min=true}" + i1),
            params("stats.calcdistinct", "false", "f." + i1 + ".stats.calcdistinct", "true", "stats.field",
                    "{!min=true}" + i1),
            params("stats.calcdistinct", "false", "f." + i1 + ".stats.calcdistinct", "false", "stats.field",
                    "{!min=true calcdistinct=true}" + i1),
            params("stats.calcdistinct", "false", "f." + i1 + ".stats.calcdistinct", "false", "stats.field",
                    "{!min=true countDistinct=true distinctValues=true}" + i1),
            params("stats.field", "{!min=true countDistinct=true distinctValues=true}" + i1), params("yes",
                    "true", "stats.field", "{!min=$yes countDistinct=$yes distinctValues=$yes}" + i1), }) {

        rsp = query(SolrParams.wrapDefaults(p, params("q", "*:*", "sort", i1 + " desc", "stats", "true")));
        FieldStatsInfo s = rsp.getFieldStatsInfo().get(i1);
        assertNotNull(p + " no stats for " + i1, s);
        //
        assertEquals(p + " wrong min", -987.0D, (Double) s.getMin(), 0.0001D);
        assertEquals(p + " wrong calcDistinct", new Long(13), s.getCountDistinct());
        assertNotNull(p + " expected non-null list for distinct vals", s.getDistinctValues());
        assertEquals(p + " expected list for distinct vals", 13, s.getDistinctValues().size());
        //
        assertNull(p + " expected null for mean", s.getMean());
        assertNull(p + " expected null for count", s.getCount());
        assertNull(p + " expected null for max", s.getMax());
        assertNull(p + " expected null for missing", s.getMissing());
        assertNull(p + " expected null for stddev", s.getStddev());
        assertNull(p + " expected null for sum", s.getSum());
        assertNull(p + " expected null for percentiles", s.getPercentiles());
        assertNull(p + " expected null for cardinality", s.getCardinality());

    }

    // all of these diff ways of excluding calcdistinct should have the same result
    for (SolrParams p : new SolrParams[] { params("stats.field", "{!min=true calcdistinct=false}" + i1),
            params("stats.calcdistinct", "false", "stats.field", "{!min=true}" + i1),
            params("f." + i1 + ".stats.calcdistinct", "false", "stats.field", "{!min=true}" + i1),
            params("stats.calcdistinct", "true", "f." + i1 + ".stats.calcdistinct", "false", "stats.field",
                    "{!min=true}" + i1),
            params("stats.calcdistinct", "true", "f." + i1 + ".stats.calcdistinct", "true", "stats.field",
                    "{!min=true calcdistinct=false}" + i1),
            params("stats.calcdistinct", "true", "f." + i1 + ".stats.calcdistinct", "true", "stats.field",
                    "{!min=true countDistinct=false distinctValues=false}" + i1), }) {

        rsp = query(SolrParams.wrapDefaults(p, params("q", "*:*", "sort", i1 + " desc", "stats", "true")));
        FieldStatsInfo s = rsp.getFieldStatsInfo().get(i1);
        assertNotNull(p + " no stats for " + i1, s);
        //
        assertEquals(p + " wrong min", -987.0D, (Double) s.getMin(), 0.0001D);
        //
        assertNull(p + " expected null for calcDistinct", s.getCountDistinct());
        assertNull(p + " expected null for distinct vals", s.getDistinctValues());
        //
        assertNull(p + " expected null for mean", s.getMean());
        assertNull(p + " expected null for count", s.getCount());
        assertNull(p + " expected null for max", s.getMax());
        assertNull(p + " expected null for missing", s.getMissing());
        assertNull(p + " expected null for stddev", s.getStddev());
        assertNull(p + " expected null for sum", s.getSum());
        assertNull(p + " expected null for percentiles", s.getPercentiles());
        assertNull(p + " expected null for cardinality", s.getCardinality());
    }

    // this field doesn't exist in any doc in the result set.
    // ensure we get expected values for the stats we ask for, but null for the stats
    rsp = query("q", "*:*", "sort", i1 + " desc", "stats", "true", "stats.field",
            "{!min=true mean=true stddev=true}does_not_exist_i");
    { // don't leak variables 
        FieldStatsInfo s = rsp.getFieldStatsInfo().get("does_not_exist_i");
        assertNotNull("no stats for bogus field", s);

        // things we explicit expect because we asked for them
        // NOTE: min is expected to be null even though requested because of no values
        assertEquals("wrong min", null, s.getMin());
        assertTrue("mean should be NaN", ((Double) s.getMean()).isNaN());
        assertEquals("wrong stddev", 0.0D, (Double) s.getStddev(), 0.0D);

        // things that we didn't ask for, so they better be null
        assertNull("expected null for count", s.getCount());
        assertNull("expected null for calcDistinct", s.getCountDistinct());
        assertNull("expected null for distinct vals", s.getDistinctValues());
        assertNull("expected null for max", s.getMax());
        assertNull("expected null for missing", s.getMissing());
        assertNull("expected null for sum", s.getSum());
        assertNull("expected null for percentiles", s.getPercentiles());
        assertNull("expected null for cardinality", s.getCardinality());
    }

    // look at stats on non numeric fields
    //
    // not all stats are supported on every field type, so some of these permutations will 
    // result in no stats being computed but this at least lets us sanity check that for each 
    // of these field+stats(s) combinations we get consistent results between the distribted 
    // request and the single node situation.
    //
    // NOTE: percentiles excluded because it doesn't support simple 'true/false' syntax
    // (and since it doesn't work for non-numerics anyway, we aren't missing any coverage here)
    EnumSet<Stat> allStats = EnumSet.complementOf(EnumSet.of(Stat.percentiles));

    int numTotalStatQueries = 0;
    // don't go overboard, just do all permutations of 1 or 2 stat params, for each field & query
    final int numStatParamsAtOnce = 2;
    for (int numParams = 1; numParams <= numStatParamsAtOnce; numParams++) {
        for (EnumSet<Stat> set : new StatSetCombinations(numParams, allStats)) {

            for (String field : new String[] { "foo_f", i1, tlong, tdate_a, oddField, "foo_sev_enum",
                    // fields that no doc has any value in
                    "bogus___s", "bogus___f", "bogus___i", "bogus___tdt", "bogus___sev_enum" }) {

                for (String q : new String[] { "*:*", // all docs
                        "bogus___s:bogus", // no docs
                        "id:" + random().nextInt(50), // 0 or 1 doc...
                        "id:" + random().nextInt(50), "id:" + random().nextInt(100),
                        "id:" + random().nextInt(100), "id:" + random().nextInt(200) }) {

                    // EnumSets use natural ordering, we want to randomize the order of the params
                    List<Stat> combo = new ArrayList<Stat>(set);
                    Collections.shuffle(combo, random());

                    StringBuilder paras = new StringBuilder("{!key=k ");

                    for (Stat stat : combo) {
                        paras.append(stat + "=true ");
                    }

                    paras.append("}").append(field);
                    numTotalStatQueries++;
                    rsp = query("q", q, "rows", "0", "stats", "true", "stats.field", paras.toString());
                    // simple assert, mostly relying on comparison with single shard
                    FieldStatsInfo s = rsp.getFieldStatsInfo().get("k");
                    assertNotNull(s);

                    // TODO: if we had a programatic way to determine what stats are supported 
                    // by what field types, we could make more confident asserts here.
                }
            }
        }
    }
    handle.remove("stddev");
    handle.remove("sumOfSquares");
    assertEquals("Sanity check failed: either test broke, or test changed, or you adjusted Stat enum"
            + " (adjust constant accordingly if intentional)", 5082, numTotalStatQueries);

    /*** TODO: the failure may come back in "exception"
    try {
      // test error produced for field that is invalid for schema
      query("q","*:*", "rows",100, "facet","true", "facet.field",invalidField, "facet.mincount",2);
      TestCase.fail("SolrServerException expected for invalid field that is not in schema");
    } catch (SolrServerException ex) {
      // expected
    }
    ***/

    // Try to get better coverage for refinement queries by turning off over requesting.
    // This makes it much more likely that we may not get the top facet values and hence
    // we turn of that checking.
    handle.put("facet_fields", SKIPVAL);
    query("q", "*:*", "rows", 0, "facet", "true", "facet.field", t1, "facet.limit", 5, "facet.shard.limit", 5);
    // check a complex key name
    query("q", "*:*", "rows", 0, "facet", "true", "facet.field", "{!key='$a b/c \\' \\} foo'}" + t1,
            "facet.limit", 5, "facet.shard.limit", 5);
    query("q", "*:*", "rows", 0, "facet", "true", "facet.field", "{!key='$a'}" + t1, "facet.limit", 5,
            "facet.shard.limit", 5);
    handle.remove("facet_fields");
    // Make sure there is no macro expansion for field values
    query("q", "*:*", "rows", 0, "facet", "true", "facet.field", s1, "facet.limit", 5, "facet.shard.limit", 5);
    query("q", "*:*", "rows", 0, "facet", "true", "facet.field", s1, "facet.limit", 5, "facet.shard.limit", 5,
            "expandMacros", "true");
    query("q", "*:*", "rows", 0, "facet", "true", "facet.field", s1, "facet.limit", 5, "facet.shard.limit", 5,
            "expandMacros", "false");
    // Macro expansion should still work for the parameters
    query("q", "*:*", "rows", 0, "facet", "true", "facet.field", "${foo}", "f.${foo}.mincount", 1, "foo", s1);
    query("q", "*:*", "rows", 0, "facet", "true", "facet.field", "${foo}", "f.${foo}.mincount", 1, "foo", s1,
            "expandMacros", "true");

    // index the same document to two servers and make sure things
    // don't blow up.
    if (clients.size() >= 2) {
        index(id, 100, i1, 107, t1, "oh no, a duplicate!");
        for (int i = 0; i < clients.size(); i++) {
            index_specific(i, id, 100, i1, 107, t1, "oh no, a duplicate!");
        }
        commit();
        query("q", "duplicate", "hl", "true", "hl.fl", t1);
        query("q", "fox duplicate horses", "hl", "true", "hl.fl", t1);
        query("q", "*:*", "rows", 100);
    }

    //SOLR 3161 ensure shards.qt=/update fails (anything but search handler really)
    // Also see TestRemoteStreaming#testQtUpdateFails()
    try {
        ignoreException("isShard is only acceptable");
        // query("q","*:*","shards.qt","/update","stream.body","<delete><query>*:*</query></delete>");
        // fail();
    } catch (SolrException e) {
        //expected
    }
    unIgnoreException("isShard is only acceptable");

    // test debugging
    // handle.put("explain", UNORDERED);
    handle.put("explain", SKIPVAL); // internal docids differ, idf differs w/o global idf
    handle.put("debug", UNORDERED);
    handle.put("time", SKIPVAL);
    handle.put("track", SKIP); //track is not included in single node search
    query("q", "now their fox sat had put", "fl", "*,score", CommonParams.DEBUG_QUERY, "true");
    query("q", "id:[1 TO 5]", CommonParams.DEBUG_QUERY, "true");
    query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.TIMING);
    query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.RESULTS);
    query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.QUERY);

    // SOLR-6545, wild card field list
    indexr(id, "19", "text", "d", "cat_a_sS", "1", t1, "2");
    commit();

    rsp = query("q", "id:19", "fl", "id", "fl", "*a_sS");
    assertFieldValues(rsp.getResults(), "id", 19);

    rsp = query("q", "id:19", "fl", "id," + t1 + ",cat*");
    assertFieldValues(rsp.getResults(), "id", 19);

    // Check Info is added to for each shard
    ModifiableSolrParams q = new ModifiableSolrParams();
    q.set("q", "*:*");
    q.set(ShardParams.SHARDS_INFO, true);
    setDistributedParams(q);
    rsp = queryServer(q);
    NamedList<?> sinfo = (NamedList<?>) rsp.getResponse().get(ShardParams.SHARDS_INFO);
    String shards = getShardsString();
    int cnt = StringUtils.countMatches(shards, ",") + 1;

    assertNotNull("missing shard info", sinfo);
    assertEquals("should have an entry for each shard [" + sinfo + "] " + shards, cnt, sinfo.size());

    // test shards.tolerant=true
    for (int numDownServers = 0; numDownServers < jettys.size() - 1; numDownServers++) {
        List<JettySolrRunner> upJettys = new ArrayList<>(jettys);
        List<SolrClient> upClients = new ArrayList<>(clients);
        List<JettySolrRunner> downJettys = new ArrayList<>();
        List<String> upShards = new ArrayList<>(Arrays.asList(shardsArr));
        for (int i = 0; i < numDownServers; i++) {
            // shut down some of the jettys
            int indexToRemove = r.nextInt(upJettys.size());
            JettySolrRunner downJetty = upJettys.remove(indexToRemove);
            upClients.remove(indexToRemove);
            upShards.remove(indexToRemove);
            ChaosMonkey.stop(downJetty);
            downJettys.add(downJetty);
        }

        queryPartialResults(upShards, upClients, "q", "*:*", "facet", "true", "facet.field", t1, "facet.field",
                t1, "facet.limit", 5, ShardParams.SHARDS_INFO, "true", ShardParams.SHARDS_TOLERANT, "true");

        queryPartialResults(upShards, upClients, "q", "*:*", "facet", "true", "facet.query", i1 + ":[1 TO 50]",
                "facet.query", i1 + ":[1 TO 50]", ShardParams.SHARDS_INFO, "true", ShardParams.SHARDS_TOLERANT,
                "true");

        // test group query
        queryPartialResults(upShards, upClients, "q", "*:*", "rows", 100, "fl", "id," + i1, "group", "true",
                "group.query", t1 + ":kings OR " + t1 + ":eggs", "group.limit", 10, "sort", i1 + " asc, id asc",
                CommonParams.TIME_ALLOWED, 1, ShardParams.SHARDS_INFO, "true", ShardParams.SHARDS_TOLERANT,
                "true");

        queryPartialResults(upShards, upClients, "q", "*:*", "stats", "true", "stats.field", i1,
                ShardParams.SHARDS_INFO, "true", ShardParams.SHARDS_TOLERANT, "true");

        queryPartialResults(upShards, upClients, "q", "toyata", "spellcheck", "true", "spellcheck.q", "toyata",
                "qt", "spellCheckCompRH_Direct", "shards.qt", "spellCheckCompRH_Direct",
                ShardParams.SHARDS_INFO, "true", ShardParams.SHARDS_TOLERANT, "true");

        // restart the jettys
        for (JettySolrRunner downJetty : downJettys) {
            ChaosMonkey.start(downJetty);
        }
    }

    // This index has the same number for every field

    // TODO: This test currently fails because debug info is obtained only
    // on shards with matches.
    // query("q","matchesnothing","fl","*,score", "debugQuery", "true");

    // Thread.sleep(10000000000L);

    del("*:*"); // delete all docs and test stats request
    commit();
    try {
        query("q", "*:*", "stats", "true", "stats.field", "stats_dt", "stats.field", i1, "stats.field", tdate_a,
                "stats.field", tdate_b, "stats.calcdistinct", "true");
    } catch (HttpSolrClient.RemoteSolrException e) {
        if (e.getMessage().startsWith("java.lang.NullPointerException")) {
            fail("NullPointerException with stats request on empty index");
        } else {
            throw e;
        }
    }

    String fieldName = "severity";
    indexr("id", "1", fieldName, "Not Available");
    indexr("id", "2", fieldName, "Low");
    indexr("id", "3", fieldName, "Medium");
    indexr("id", "4", fieldName, "High");
    indexr("id", "5", fieldName, "Critical");

    commit();

    rsp = query("q", "*:*", "stats", "true", "stats.field", fieldName);
    assertEquals(new EnumFieldValue(0, "Not Available"), rsp.getFieldStatsInfo().get(fieldName).getMin());
    query("q", "*:*", "stats", "true", "stats.field", fieldName, StatsParams.STATS_CALC_DISTINCT, "true");
    assertEquals(new EnumFieldValue(4, "Critical"), rsp.getFieldStatsInfo().get(fieldName).getMax());

    handle.put("severity", UNORDERED); // this is stupid, but stats.facet doesn't garuntee order
    query("q", "*:*", "stats", "true", "stats.field", fieldName, "stats.facet", fieldName);
}

From source file:com.vivastream.security.oauth2.provider.token.store.DynamoDBTokenStore.java

private Collection<OAuth2AccessToken> loadTokensByClientAndUserIndex(Map<String, Condition> keyCondition,
        boolean filterOutNullUsers) {
    List<OAuth2AccessToken> accessTokens = new ArrayList<OAuth2AccessToken>();

    List<String> accessTokenIds = null;
    try {/*from w ww.ja  v a 2s  .c o m*/
        accessTokenIds = dynamoDBTemplate.query(schema.getAccessTableName(),
                schema.getAccessIndexClientIdAndUserName(), keyCondition, //
                new ObjectExtractor<String>() {

                    public String extract(Map<String, AttributeValue> values) {
                        return values.get(schema.getAccessColumnTokenId()).getS();
                    }
                }, schema.getAccessColumnTokenId());

        List<Map<String, AttributeValue>> keys = new ArrayList<Map<String, AttributeValue>>(
                accessTokenIds.size());
        for (String accessTokenId : accessTokenIds) {
            keys.add(Collections.singletonMap(schema.getAccessColumnTokenId(),
                    new AttributeValue(accessTokenId)));
        }
        if (filterOutNullUsers) {
            accessTokens = dynamoDBTemplate.batchGet(schema.getAccessTableName(), // 
                    new KeysAndAttributes().withKeys(keys).withConsistentRead(true).withAttributesToGet(
                            schema.getAccessColumnTokenId(), schema.getAccessColumnToken(),
                            schema.getAccessColumnIsNullUser()), // 
                    new NonNullUserSafeAccessTokenExtractor());
        } else {
            accessTokens = dynamoDBTemplate.batchGet(schema.getAccessTableName(), // 
                    new KeysAndAttributes().withKeys(keys).withConsistentRead(true).withAttributesToGet(
                            schema.getAccessColumnTokenId(), schema.getAccessColumnToken()), // 
                    new SafeAccessTokenExtractor());
        }
    } catch (EmptyResultDataAccessException e) {
        if (LOG.isInfoEnabled()) {
            LOG.info("Failed to find access token for " + keyCondition.toString());
        }
    }
    accessTokens = removeNulls(accessTokens);

    return accessTokens;
}

From source file:at.ac.tuwien.dsg.cloud.salsa.engine.smartdeployment.main.SmartDeploymentService.java

private String enrich_CAMF_CSAR_Process(String csarTmp, String serviceName) {
    String extractedFolder = csarTmp + ".extracted";
    String toscaFile = extractedFolder + "/Definitions/Application.tosca";
    String scriptDir = extractedFolder + "/Scripts/";
    try {/*  w  w  w. j a v a 2s .  co m*/
        // extract CSAR
        CSARParser.extractCsar(new File(csarTmp), extractedFolder);

        // enrich with QUELLE for
        String toscaXML = FileUtils.readFileToString(new File(toscaFile));
        EngineLogger.logger.debug("Read tosca string done. 100 first characters: {}", toscaXML);
        EngineLogger.logger.debug("Now trying to enrich with QUELLE....");
        //enrichCAMFToscaWithQuelle(toscaXML, serviceName, new String[]{EnrichFunctions.QuelleCloudServiceRecommendation.toString(), EnrichFunctions.SalsaInfoCompletion.toString()});
        SmartDeploymentService sds = new SmartDeploymentService();
        String result = sds.enrichCAMFToscaWithQuelle(toscaXML, serviceName,
                new String[] { EnrichFunctions.QuelleCloudServiceRecommendation.toString() });
        EngineLogger.logger.debug("After enrich with QUELLE, the result is: {}", result);
        // write back to right place
        FileUtils.writeStringToFile(new File(toscaFile), result);

        // read software requirement in TOSCA for each node, put in a map + artifact
        // a map between node ID and full requirement in Tag
        Map<String, String> allRequirements = new HashMap<>();
        TDefinitions def = ToscaXmlProcess.readToscaFile(toscaFile);
        for (TNodeTemplate node : ToscaStructureQuery.getNodeTemplateList(def)) {
            EngineLogger.logger.debug("Checking node: {}", node.getId());
            String policiesStr = new String();
            if (node.getPolicies() != null) {
                EngineLogger.logger.debug("Found policies of node: " + node.getId() + "/" + node.getName());
                List<TPolicy> policies = node.getPolicies().getPolicy();
                for (TPolicy p : policies) {
                    if (p.getPolicyType().getLocalPart().equals("Requirement")
                            && p.getPolicyType().getPrefix().equals("SmartDeployment")) {
                        if (p.getName().startsWith("CONSTRAINT")) {
                            // TODO: parse SYBL policies
                        } else {
                            policiesStr += p.getName().trim();
                            if (!p.getName().trim().endsWith(";")) {
                                policiesStr += ";";
                                EngineLogger.logger.debug("polociesStr = {}", policiesStr);
                            }
                        }
                    }
                }
            }
            EngineLogger.logger.debug("Collected policies for node {} is : {}", node.getId(), policiesStr);
            allRequirements.put(node.getId(), policiesStr);
        }
        EngineLogger.logger.debug("In total, we got following requirements: " + allRequirements.toString());

        // Load dependency graph knowledge base
        String dependencyDataFile = SmartDeploymentService.class.getResource("/data/salsa.dependencygraph.xml")
                .getFile();
        SalsaStackDependenciesGraph depGraph = SalsaStackDependenciesGraph
                .fromXML(FileUtils.readFileToString(new File(dependencyDataFile)));

        // ENRICH SCRIPT
        // extract all the requirement, put into the hashmap
        for (Map.Entry<String, String> entry : allRequirements.entrySet()) {
            EngineLogger.logger.debug("Analyzing node: {}. Full policies string is: *** {} ***", entry.getKey(),
                    entry.getValue());

            // extract CARL Strings
            CharStream stream = new ANTLRInputStream(entry.getValue());
            CARLLexer lexer = new CARLLexer(stream);
            CommonTokenStream tokens = new CommonTokenStream(lexer);
            CARLParser parser = new CARLParser(tokens);
            RequirementsContext requirementsContext = parser.requirements();

            ParseTreeWalker walker = new ParseTreeWalker(); // create standard walker
            CARLProgramListener extractor = new CARLProgramListener(parser);
            walker.walk(extractor, requirementsContext); // initiate walk of tree with listener    
            org.eclipse.camf.carl.model.Requirements requirements = extractor.getRequirements();

            HashMap<String, String> allReqsOfNode = new HashMap<>();
            ArrayList<String> checkList = new ArrayList<>();
            // os=Ubuntu; os:ver=12.04; sw=jre:1.7 ==> os=Ubuntu, 
            // here flat all the requirement of the node
            for (IRequirement req : requirements.getRequirements()) {
                EngineLogger.logger.debug("Irequirement: " + req.toString());
                if (req.getCategory().equals(RequirementCategory.SOFTWARE)) {
                    SoftwareRequirement swr = (SoftwareRequirement) req;
                    allReqsOfNode.put("sw", removeQuote(swr.getName()));
                    allReqsOfNode.put(removeQuote(swr.getName()) + ":ver", swr.getVersion().getVersion());
                    checkList.add(swr.getName());
                } else {
                    if (req.getCategory().equals(RequirementCategory.OPERATING_SYSTEM)) { // the system part is generated by quelle
                        OSRequirement osReq = (OSRequirement) req;
                        if (osReq.getName() != null) {
                            allReqsOfNode.put("os", removeQuote(osReq.getName()));
                        }
                        if (osReq.getVersion() != null) {
                            allReqsOfNode.put("os:ver", osReq.getVersion().getVersion());
                        }

                    }
                }
            }
            // find all the deploymet script of all "sw" requirements
            LinkedList<String> listOfScripts = new LinkedList<>();
            EngineLogger.logger.debug("The node {} will be enriched based-on the requirements: {}",
                    entry.getKey(), checkList.toString());
            for (String swReq : checkList) {
                EngineLogger.logger.debug("Searching deployment script for software req: {}", swReq);
                SalsaStackDependenciesGraph theNode = depGraph.findNodeByName(swReq);
                EngineLogger.logger.debug("Node found: {}", theNode.getName());
                EngineLogger.logger.debug("All requirements: {}", allReqsOfNode.toString());

                LinkedList<String> tmp = theNode.searchDeploymentScriptTemplate(allReqsOfNode);
                if (tmp != null) {
                    listOfScripts.addAll(tmp);
                }
            }
            EngineLogger.logger.debug(listOfScripts.toString());

            // create a script to solve all dependencies first
            String nodeID = entry.getKey();
            String theDependencyScript = "#!/bin/bash \n\n######## Generated by the Decision Module to solve the software dependencies ######## \n\n";
            for (String appendScript : listOfScripts) {
                String theAppend = SmartDeploymentService.class.getResource("/scriptRepo/" + appendScript)
                        .getFile();
                String stringToAppend = FileUtils.readFileToString(new File(theAppend));
                theDependencyScript += stringToAppend + "\n";
            }
            theDependencyScript += "######## End of generated script ########";
            String tmpScriptFile = scriptDir + "/" + nodeID + ".salsatmp";

            // read original script, remove the #!/bin/bash if having
            String originalScriptFile = null;
            TNodeTemplate node = ToscaStructureQuery.getNodetemplateById(nodeID, def);
            EngineLogger.logger.debug("Getting artifact template of node: {}", node.getId());
            for (TDeploymentArtifact art : node.getDeploymentArtifacts().getDeploymentArtifact()) {
                EngineLogger.logger.debug("Checking art.Name: {}, type: {}", art.getName(),
                        art.getArtifactType().getLocalPart());
                if (art.getArtifactType().getLocalPart().equals("ScriptArtifactPropertiesType")) {
                    String artTemplateID = art.getArtifactRef().getLocalPart();
                    TArtifactTemplate artTemplate = ToscaStructureQuery.getArtifactTemplateById(artTemplateID,
                            def);
                    if (artTemplate != null) {
                        originalScriptFile = artTemplate.getArtifactReferences().getArtifactReference().get(0)
                                .getReference();
                        originalScriptFile = extractedFolder + "/" + originalScriptFile;
                    }
                }
            }
            if (originalScriptFile != null) {
                String originalScript = FileUtils.readFileToString(new File(originalScriptFile));
                originalScript = originalScript.replace("#!/bin/bash", "");
                originalScript = originalScript.replace("#!/bin/sh", "");
                theDependencyScript += originalScript;
                FileUtils.writeStringToFile(new File(tmpScriptFile), theDependencyScript);
                EngineLogger.logger.debug("originalScript: {}, moveto: {}", originalScriptFile,
                        originalScriptFile + ".original");
                FileUtils.moveFile(FileUtils.getFile(originalScriptFile),
                        FileUtils.getFile(originalScriptFile + ".original"));
                FileUtils.moveFile(FileUtils.getFile(tmpScriptFile), FileUtils.getFile(originalScriptFile));
            } else {
                // TODO: there is no original script, just add new template, add tmpScript into that
            }

        } // end for each node in allRequirements analysis

        // repack the CSAR
        FileUtils.deleteQuietly(FileUtils.getFile(csarTmp));
        File directory = new File(extractedFolder);
        File[] fList = directory.listFiles();

        //CSARParser.buildCSAR(fList, csarTmp);
        String builtCSAR = SalsaConfiguration.getToscaTemplateStorage() + "/" + serviceName + ".csar";
        CSARParser.buildCSAR(extractedFolder, builtCSAR);

    } catch (IOException ex) {
        EngineLogger.logger.error("Error when enriching CSAR: " + csarTmp, ex);
        return "Error";
    } catch (JAXBException ex) {
        EngineLogger.logger.error("Cannot parse the Tosca definition in CSAR file: " + toscaFile, ex);
        return "Error";
    }

    // return the link to the CSAR
    String csarURLReturn = SalsaConfiguration.getSalsaCenterEndpoint() + "/rest/smart/CAMFTosca/enrich/CSAR/"
            + serviceName;
    EngineLogger.logger.info("Enrich CSAR done. URL to download is: {}", csarURLReturn);
    return csarURLReturn;
}