Example usage for java.util LinkedList getLast

List of usage examples for java.util LinkedList getLast

Introduction

In this page you can find the example usage for java.util LinkedList getLast.

Prototype

public E getLast() 

Source Link

Document

Returns the last element in this list.

Usage

From source file:com.zimbra.cs.service.mail.ToXML.java

private static void addParts(Element root, MPartInfo mpiRoot, Set<MPartInfo> bodies, String prefix, int maxSize,
        boolean neuter, boolean excludeCalendarParts, String defaultCharset, boolean swallowContentExceptions,
        MsgContent wantContent) throws ServiceException {
    MPartInfo mpi = mpiRoot;/*from  ww w  .j  a va2  s  .c  o  m*/
    LinkedList<Pair<Element, LinkedList<MPartInfo>>> queue = new LinkedList<Pair<Element, LinkedList<MPartInfo>>>();
    Pair<Element, LinkedList<MPartInfo>> level = new Pair<Element, LinkedList<MPartInfo>>(root,
            new LinkedList<MPartInfo>());
    level.getSecond().add(mpi);
    queue.add(level);

    VisitPhase phase = VisitPhase.PREVISIT;
    while (!queue.isEmpty()) {
        level = queue.getLast();
        LinkedList<MPartInfo> parts = level.getSecond();
        if (parts.isEmpty()) {
            queue.removeLast();
            phase = VisitPhase.POSTVISIT;
            continue;
        }

        mpi = parts.getFirst();
        Element child = addPart(phase, level.getFirst(), root, mpi, bodies, prefix, maxSize, neuter,
                excludeCalendarParts, defaultCharset, swallowContentExceptions, wantContent);
        if (phase == VisitPhase.PREVISIT && child != null && mpi.hasChildren()) {
            queue.addLast(new Pair<Element, LinkedList<MPartInfo>>(child,
                    new LinkedList<MPartInfo>(mpi.getChildren())));
        } else {
            parts.removeFirst();
            phase = VisitPhase.PREVISIT;
        }
    }
}

From source file:tokyo.northside.jrst.JRSTLexer.java

/**
 * .. _frungible doodads: http://www.example.org/
 * /* ww w.  ja  va  2 s  . co m*/
 * @return Element
 * @throws IOException
 */
public LinkedList<Element> refTarget() throws IOException {
    beginPeek();

    String[] lines = in.readAll();
    LinkedList<Element> result = new LinkedList<Element>();
    for (String line : lines) {
        if (line.matches("^\\s*\\.\\.\\s_[^_:].+:.*")) {
            result.add(DocumentHelper.createElement(TARGET));
            Matcher matcher = Pattern.compile("\\.\\.\\s_").matcher(line);
            if (matcher.find()) {
                boolean done = false;
                for (int i = matcher.end(); i < line.length() && !done; i++) {
                    if (line.charAt(i) == ':') {
                        result.getLast().addAttribute(LEVEL, "" + level(line));
                        result.getLast().addAttribute(ID, URLEncoder.encode(
                                line.substring(matcher.end(), i).replaceAll(" ", "-").toLowerCase(), "UTF-8"));
                        result.getLast().addAttribute(NAME, line.substring(matcher.end(), i).toLowerCase());
                        if (i + 2 > line.length()) {
                            line = in.readLine();
                            // FIXME 20071129 chatellier
                            // line = null if link is non well formed
                            // .. _Unifying types and classes in Python:
                            // miss uri
                            if (line == null) {
                                line = "";
                            }
                            result.getLast().addAttribute(REFURI, line.trim());
                        } else {
                            result.getLast().addAttribute(REFURI, line.substring(i + 2, line.length()));
                        }

                        done = true;
                    }
                }
            }
        }
    }
    endPeek();
    return result;
}

From source file:org.eclipse.wb.internal.core.model.description.helpers.ComponentDescriptionHelper.java

/**
 * @param editor/* w ww  .ja  v a 2 s  . co m*/
 *          the {@link AstEditor} in context of which we work now.
 * @param key
 *          the {@link ComponentDescriptionKey} of requested {@link ComponentDescription}.
 * @param additionalDescriptionInfos
 *          additional {@link ClassResourceInfo}'s to parse after {@link ClassResourceInfo}'s
 *          collected for component {@link Class}. May be empty, but not <code>null</code>.
 * 
 * @return the {@link ComponentDescription} of component with given {@link Class}.
 * @throws Exception
 *           if no {@link ComponentDescription} can be found.
 */
private static ComponentDescription getDescription0(AstEditor editor, ComponentDescriptionKey key,
        List<ClassResourceInfo> additionalDescriptionInfos) throws Exception {
    EditorState state = EditorState.get(editor);
    ILoadingContext context = EditorStateLoadingContext.get(state);
    Class<?> componentClass = key.getComponentClass();
    //
    try {
        // prepare result description
        ComponentDescription componentDescription = new ComponentDescription(key);
        addConstructors(editor.getJavaProject(), componentDescription);
        componentDescription.setBeanInfo(ReflectionUtils.getBeanInfo(componentClass));
        componentDescription.setBeanDescriptor(new IntrospectionHelper(componentClass).getBeanDescriptor());
        // prepare list of description resources, from generic to specific
        LinkedList<ClassResourceInfo> descriptionInfos;
        {
            descriptionInfos = Lists.newLinkedList();
            DescriptionHelper.addDescriptionResources(descriptionInfos, context, componentClass);
            Assert.isTrueException(!descriptionInfos.isEmpty(),
                    ICoreExceptionConstants.DESCRIPTION_NO_DESCRIPTIONS, componentClass.getName());
            // at last append additional description resource
            descriptionInfos.addAll(additionalDescriptionInfos);
        }
        // prepare Digester
        Digester digester;
        {
            digester = new Digester();
            digester.setLogger(new NoOpLog());
            addRules(digester, editor, componentClass);
        }
        // read descriptions from generic to specific
        for (ClassResourceInfo descriptionInfo : descriptionInfos) {
            ResourceInfo resourceInfo = descriptionInfo.resource;
            // read next description
            {
                componentDescription.setCurrentClass(descriptionInfo.clazz);
                digester.push(componentDescription);
                // do parse
                InputStream is = resourceInfo.getURL().openStream();
                try {
                    digester.parse(is);
                } finally {
                    IOUtils.closeQuietly(is);
                }
            }
            // clear parts that can not be inherited
            if (descriptionInfo.clazz == componentClass) {
                setDescriptionWithInnerTags(componentDescription, resourceInfo);
            } else {
                componentDescription.clearCreations();
                componentDescription.setDescription(null);
            }
        }
        // set toolkit
        if (componentDescription.getToolkit() == null) {
            for (int i = descriptionInfos.size() - 1; i >= 0; i--) {
                ClassResourceInfo descriptionInfo = descriptionInfos.get(i);
                ToolkitDescription toolkit = descriptionInfo.resource.getToolkit();
                if (toolkit != null) {
                    componentDescription.setToolkit(toolkit);
                    break;
                }
            }
            Assert.isTrueException(componentDescription.getToolkit() != null,
                    ICoreExceptionConstants.DESCRIPTION_NO_TOOLKIT, componentClass.getName());
        }
        // icon, default creation
        setIcon(context, componentDescription, componentClass);
        configureDefaultCreation(componentDescription);
        // final operations
        {
            Assert.isNotNull(componentDescription.getModelClass());
            componentDescription.joinProperties();
        }
        // add to caches
        if (key.isPureComponent() && !"true".equals(componentDescription.getParameter("dontCacheDescription"))
                && shouldCacheDescriptions_inPackage(descriptionInfos.getLast(), componentClass)) {
            componentDescription.setCached(true);
        }
        // mark for caching presentation
        if (shouldCachePresentation(descriptionInfos.getLast(), componentClass)) {
            componentDescription.setPresentationCached(true);
        }
        // use processors
        for (IDescriptionProcessor processor : getDescriptionProcessors()) {
            processor.process(editor, componentDescription);
        }
        // well, we have result
        return componentDescription;
    } catch (SAXParseException e) {
        throw new DesignerException(ICoreExceptionConstants.DESCRIPTION_LOAD_ERROR, e.getException(),
                componentClass.getName());
    }
}

From source file:org.epics.archiverappliance.retrieval.DataRetrievalServlet.java

private void doGetMultiPV(HttpServletRequest req, HttpServletResponse resp)
        throws ServletException, IOException {

    PoorMansProfiler pmansProfiler = new PoorMansProfiler();

    // Gets the list of PVs specified by the `pv` parameter
    // String arrays might be inefficient for retrieval. In any case, they are sorted, which is essential later on.
    List<String> pvNames = Arrays.asList(req.getParameterValues("pv"));

    // Ensuring that the AA has finished starting up before requests are accepted.
    if (configService.getStartupState() != STARTUP_SEQUENCE.STARTUP_COMPLETE) {
        String msg = "Cannot process data retrieval requests for specified PVs ("
                + StringUtils.join(pvNames, ", ") + ") until the appliance has completely started up.";
        logger.error(msg);/*ww w  .  j  a v a 2s  .  c o m*/
        resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
        resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE, msg);
        return;
    }

    // Getting various fields from arguments
    String startTimeStr = req.getParameter("from");
    String endTimeStr = req.getParameter("to");
    boolean useReduced = false;
    String useReducedStr = req.getParameter("usereduced");
    if (useReducedStr != null && !useReducedStr.equals("")) {
        try {
            useReduced = Boolean.parseBoolean(useReducedStr);
        } catch (Exception ex) {
            logger.error("Exception parsing usereduced", ex);
            useReduced = false;
        }
    }

    // Getting MIME type
    String extension = req.getPathInfo().split("\\.")[1];
    logger.info("Mime is " + extension);

    if (!extension.equals("json") && !extension.equals("raw") && !extension.equals("jplot")
            && !extension.equals("qw")) {
        String msg = "Mime type " + extension + " is not supported. Please use \"json\", \"jplot\" or \"raw\".";
        resp.setHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
        resp.sendError(HttpServletResponse.SC_BAD_REQUEST, msg);
        return;
    }

    boolean useChunkedEncoding = true;
    String doNotChunkStr = req.getParameter("donotchunk");
    if (doNotChunkStr != null && !doNotChunkStr.equals("false")) {
        logger.info("Turning off HTTP chunked encoding");
        useChunkedEncoding = false;
    }

    boolean fetchLatestMetadata = false;
    String fetchLatestMetadataStr = req.getParameter("fetchLatestMetadata");
    if (fetchLatestMetadataStr != null && fetchLatestMetadataStr.equals("true")) {
        logger.info("Adding a call to the engine to fetch the latest metadata");
        fetchLatestMetadata = true;
    }

    // For data retrieval we need a PV info. However, in case of PV's that have long since retired, we may not want to have PVTypeInfo's in the system.
    // So, we support a template PV that lays out the data sources.
    // During retrieval, you can pass in the PV as a template and we'll clone this and make a temporary copy.
    String retiredPVTemplate = req.getParameter("retiredPVTemplate");

    // Goes through given PVs and returns bad request error.
    int nullPVs = 0;
    for (String pvName : pvNames) {
        if (pvName == null) {
            nullPVs++;
        }
        if (nullPVs > 0) {
            logger.warn("Some PVs are null in the request.");
            resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
            resp.sendError(HttpServletResponse.SC_BAD_REQUEST);
            return;
        }
    }

    if (pvNames.toString().matches("^.*" + ARCH_APPL_PING_PV + ".*$")) {
        logger.debug("Processing ping PV - this is used to validate the connection with the client.");
        processPingPV(req, resp);
        return;
    }

    for (String pvName : pvNames)
        if (pvName.endsWith(".VAL")) {
            int len = pvName.length();
            pvName = pvName.substring(0, len - 4);
            logger.info("Removing .VAL from pvName for request giving " + pvName);
        }

    // ISO datetimes are of the form "2011-02-02T08:00:00.000Z"
    Timestamp end = TimeUtils.plusHours(TimeUtils.now(), 1);
    if (endTimeStr != null) {
        try {
            end = TimeUtils.convertFromISO8601String(endTimeStr);
        } catch (IllegalArgumentException ex) {
            try {
                end = TimeUtils.convertFromDateTimeStringWithOffset(endTimeStr);
            } catch (IllegalArgumentException ex2) {
                String msg = "Cannot parse time " + endTimeStr;
                logger.warn(msg, ex2);
                resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
                resp.sendError(HttpServletResponse.SC_BAD_REQUEST, msg);
                return;
            }
        }
    }

    // We get one day by default
    Timestamp start = TimeUtils.minusDays(end, 1);
    if (startTimeStr != null) {
        try {
            start = TimeUtils.convertFromISO8601String(startTimeStr);
        } catch (IllegalArgumentException ex) {
            try {
                start = TimeUtils.convertFromDateTimeStringWithOffset(startTimeStr);
            } catch (IllegalArgumentException ex2) {
                String msg = "Cannot parse time " + startTimeStr;
                logger.warn(msg, ex2);
                resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
                resp.sendError(HttpServletResponse.SC_BAD_REQUEST, msg);
                return;
            }
        }
    }

    if (end.before(start)) {
        String msg = "For request, end " + end.toString() + " is before start " + start.toString() + " for pvs "
                + StringUtils.join(pvNames, ", ");
        logger.error(msg);
        resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
        resp.sendError(HttpServletResponse.SC_BAD_REQUEST, msg);
        return;
    }

    LinkedList<TimeSpan> requestTimes = new LinkedList<TimeSpan>();

    // We can specify a list of time stamp pairs using the optional timeranges parameter
    String timeRangesStr = req.getParameter("timeranges");
    if (timeRangesStr != null) {
        boolean continueWithRequest = parseTimeRanges(resp, "[" + StringUtils.join(pvNames, ", ") + "]",
                requestTimes, timeRangesStr);
        if (!continueWithRequest) {
            // Cannot parse the time ranges properly; we so abort the request.
            String msg = "The specified time ranges could not be processed appropriately. Aborting.";
            logger.info(msg);
            resp.setHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
            resp.sendError(HttpServletResponse.SC_BAD_REQUEST, msg);
            return;
        }

        // Override the start and the end so that the mergededup consumer works correctly.
        start = requestTimes.getFirst().getStartTime();
        end = requestTimes.getLast().getEndTime();

    } else {
        requestTimes.add(new TimeSpan(start, end));
    }

    assert (requestTimes.size() > 0);

    // Get a post processor for each PV specified in pvNames
    // If PV in the form <pp>(<pv>), process it
    String postProcessorUserArg = req.getParameter("pp");
    List<String> postProcessorUserArgs = new ArrayList<>(pvNames.size());
    List<PostProcessor> postProcessors = new ArrayList<>(pvNames.size());
    for (int i = 0; i < pvNames.size(); i++) {
        postProcessorUserArgs.add(postProcessorUserArg);

        if (pvNames.get(i).contains("(")) {
            if (!pvNames.get(i).contains(")")) {
                String msg = "Unbalanced paren " + pvNames.get(i);
                logger.error(msg);
                resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
                resp.sendError(HttpServletResponse.SC_BAD_REQUEST, msg);
                return;
            }
            String[] components = pvNames.get(i).split("[(,)]");
            postProcessorUserArg = components[0];
            postProcessorUserArgs.set(i, postProcessorUserArg);
            pvNames.set(i, components[1]);
            if (components.length > 2) {
                for (int j = 2; j < components.length; j++) {
                    postProcessorUserArgs.set(i, postProcessorUserArgs.get(i) + "_" + components[j]);
                }
            }
            logger.info("After parsing the function call syntax pvName is " + pvNames.get(i)
                    + " and postProcessorUserArg is " + postProcessorUserArg);
        }
        postProcessors.add(PostProcessors.findPostProcessor(postProcessorUserArg));
    }

    List<PVTypeInfo> typeInfos = new ArrayList<PVTypeInfo>(pvNames.size());
    for (int i = 0; i < pvNames.size(); i++) {
        typeInfos.add(PVNames.determineAppropriatePVTypeInfo(pvNames.get(i), configService));
    }
    pmansProfiler.mark("After PVTypeInfo");

    for (int i = 0; i < pvNames.size(); i++)
        if (typeInfos.get(i) == null && RetrievalState.includeExternalServers(req)) {
            logger.debug(
                    "Checking to see if pv " + pvNames.get(i) + " is served by a external Archiver Server");
            typeInfos.set(i,
                    checkIfPVisServedByExternalServer(pvNames.get(i), start, req, resp, useChunkedEncoding));
        }

    for (int i = 0; i < pvNames.size(); i++) {
        if (typeInfos.get(i) == null) {
            // TODO Only needed if we're forwarding the request to another server.
            if (resp.isCommitted()) {
                logger.debug("Proxied the data thru an external server for PV " + pvNames.get(i));
                return;
            }

            if (retiredPVTemplate != null) {
                PVTypeInfo templateTypeInfo = PVNames.determineAppropriatePVTypeInfo(retiredPVTemplate,
                        configService);
                if (templateTypeInfo != null) {
                    typeInfos.set(i, new PVTypeInfo(pvNames.get(i), templateTypeInfo));
                    typeInfos.get(i).setPaused(true);
                    typeInfos.get(i).setApplianceIdentity(configService.getMyApplianceInfo().getIdentity());
                    // Somehow tell the code downstream that this is a fake typeInfos.
                    typeInfos.get(i).setSamplingMethod(SamplingMethod.DONT_ARCHIVE);
                    logger.debug("Using a template PV for " + pvNames.get(i)
                            + " Need to determine the actual DBR type.");
                    setActualDBRTypeFromData(pvNames.get(i), typeInfos.get(i), configService);
                }
            }
        }

        if (typeInfos.get(i) == null) {
            String msg = "Unable to find typeinfo for pv " + pvNames.get(i);
            logger.error(msg);
            resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
            resp.sendError(HttpServletResponse.SC_NOT_FOUND, msg);
            return;
        }

        if (postProcessors.get(i) == null) {
            if (useReduced) {
                String defaultPPClassName = configService.getInstallationProperties().getProperty(
                        "org.epics.archiverappliance.retrieval.DefaultUseReducedPostProcessor",
                        FirstSamplePP.class.getName());
                logger.debug("Using the default usereduced preprocessor " + defaultPPClassName);
                try {
                    postProcessors.set(i, (PostProcessor) Class.forName(defaultPPClassName).newInstance());
                } catch (Exception ex) {
                    logger.error("Exception constructing new instance of post processor " + defaultPPClassName,
                            ex);
                    postProcessors.set(i, null);
                }
            }
        }

        if (postProcessors.get(i) == null) {
            logger.debug("Using the default raw preprocessor");
            postProcessors.set(i, new DefaultRawPostProcessor());
        }
    }

    // Get the appliances for each of the PVs
    List<ApplianceInfo> applianceForPVs = new ArrayList<ApplianceInfo>(pvNames.size());
    for (int i = 0; i < pvNames.size(); i++) {
        applianceForPVs.add(configService.getApplianceForPV(pvNames.get(i)));
        if (applianceForPVs.get(i) == null) {
            // TypeInfo cannot be null here...
            assert (typeInfos.get(i) != null);
            applianceForPVs.set(i, configService.getAppliance(typeInfos.get(i).getApplianceIdentity()));
        }
    }

    /*
     * Retrieving the external appliances if the current appliance has not got the PV assigned to it, and
     * storing the associated information of the PVs in that appliance.
     */
    Map<String, ArrayList<PVInfoForClusterRetrieval>> applianceToPVs = new HashMap<String, ArrayList<PVInfoForClusterRetrieval>>();
    for (int i = 0; i < pvNames.size(); i++) {
        if (!applianceForPVs.get(i).equals(configService.getMyApplianceInfo())) {

            ArrayList<PVInfoForClusterRetrieval> appliancePVs = applianceToPVs
                    .get(applianceForPVs.get(i).getMgmtURL());
            appliancePVs = (appliancePVs == null) ? new ArrayList<>() : appliancePVs;
            PVInfoForClusterRetrieval pvInfoForRetrieval = new PVInfoForClusterRetrieval(pvNames.get(i),
                    typeInfos.get(i), postProcessors.get(i), applianceForPVs.get(i));
            appliancePVs.add(pvInfoForRetrieval);
            applianceToPVs.put(applianceForPVs.get(i).getRetrievalURL(), appliancePVs);
        }
    }

    List<List<Future<EventStream>>> listOfEventStreamFuturesLists = new ArrayList<List<Future<EventStream>>>();
    Set<String> retrievalURLs = applianceToPVs.keySet();
    if (retrievalURLs.size() > 0) {
        // Get list of PVs and redirect them to appropriate appliance to be retrieved.
        String retrievalURL;
        ArrayList<PVInfoForClusterRetrieval> pvInfos;
        while (!((retrievalURL = retrievalURLs.iterator().next()) != null)) {
            // Get array list of PVs for appliance
            pvInfos = applianceToPVs.get(retrievalURL);
            try {
                List<List<Future<EventStream>>> resultFromForeignAppliances = retrieveEventStreamFromForeignAppliance(
                        req, resp, pvInfos, requestTimes, useChunkedEncoding,
                        retrievalURL + "/../data/getDataForPVs.raw", start, end);
                listOfEventStreamFuturesLists.addAll(resultFromForeignAppliances);
            } catch (Exception ex) {
                logger.error("Failed to retrieve " + StringUtils.join(pvNames, ", ") + " from " + retrievalURL
                        + ".");
                return;
            }
        }
    }

    pmansProfiler.mark("After Appliance Info");

    // Setting post processor for PVs, taking into account whether there is a field in the PV name
    List<String> pvNamesFromRequests = new ArrayList<String>(pvNames.size());
    for (int i = 0; i < pvNames.size(); i++) {
        String pvName = pvNames.get(i);
        pvNamesFromRequests.add(pvName);
        PVTypeInfo typeInfo = typeInfos.get(i);
        postProcessorUserArg = postProcessorUserArgs.get(i);

        // If a field is specified in a PV name, it will create a post processor for that
        String fieldName = PVNames.getFieldName(pvName);
        if (fieldName != null && !fieldName.equals("") && !pvName.equals(typeInfo.getPvName())) {
            logger.debug("We reset the pvName " + pvName + " to one from the typeinfo " + typeInfo.getPvName()
                    + " as that determines the name of the stream. " + "Also using ExtraFieldsPostProcessor.");
            pvNames.set(i, typeInfo.getPvName());
            postProcessors.set(i, new ExtraFieldsPostProcessor(fieldName));
        }

        try {
            // Postprocessors get their mandatory arguments from the request.
            // If user does not pass in the expected request, throw an exception.
            postProcessors.get(i).initialize(postProcessorUserArg, pvName);
        } catch (Exception ex) {
            String msg = "Postprocessor threw an exception during initialization for " + pvName;
            logger.error(msg, ex);
            resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
            resp.sendError(HttpServletResponse.SC_NOT_FOUND, msg);
            return;
        }
    }

    /*
     * MergeDedupConsumer is what writes PB data in its respective format to the HTML response.
     * The response, after the MergeDedupConsumer is created, contains the following:
     * 
     * 1) The content type for the response.
     * 2) Any additional headers for the particular MIME response.
     * 
     * Additionally, the MergeDedupConsumer instance holds a reference to the output stream
     * that is used to write to the HTML response. It is stored under the name `os`.
     */
    MergeDedupConsumer mergeDedupCountingConsumer;
    try {
        mergeDedupCountingConsumer = createMergeDedupConsumer(resp, extension, useChunkedEncoding);
    } catch (ServletException se) {
        String msg = "Exception when retrieving data " + "-->" + se.toString();
        logger.error(msg, se);
        resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
        resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE, msg);
        return;
    }

    /* 
     * BasicContext contains the PV name and the expected return type. Used to access PB files.
     * RetrievalExecutorResult contains a thread service class and the time spans Presumably, the 
     * thread service is what retrieves the data, and the BasicContext is the context in which it 
     * works.
     */
    List<HashMap<String, String>> engineMetadatas = new ArrayList<HashMap<String, String>>();
    try {
        List<BasicContext> retrievalContexts = new ArrayList<BasicContext>(pvNames.size());
        List<RetrievalExecutorResult> executorResults = new ArrayList<RetrievalExecutorResult>(pvNames.size());
        for (int i = 0; i < pvNames.size(); i++) {
            if (fetchLatestMetadata) {
                // Make a call to the engine to fetch the latest metadata.
                engineMetadatas.add(fetchLatestMedataFromEngine(pvNames.get(i), applianceForPVs.get(i)));
            }
            retrievalContexts.add(new BasicContext(typeInfos.get(i).getDBRType(), pvNamesFromRequests.get(i)));
            executorResults.add(determineExecutorForPostProcessing(pvNames.get(i), typeInfos.get(i),
                    requestTimes, req, postProcessors.get(i)));
        }

        /*
         * There are as many Future objects in the eventStreamFutures List as there are periods over 
         * which to fetch data. Retrieval of data happen here in parallel.
         */
        List<LinkedList<Future<RetrievalResult>>> listOfRetrievalResultFuturesLists = new ArrayList<LinkedList<Future<RetrievalResult>>>();
        for (int i = 0; i < pvNames.size(); i++) {
            listOfRetrievalResultFuturesLists.add(resolveAllDataSources(pvNames.get(i), typeInfos.get(i),
                    postProcessors.get(i), applianceForPVs.get(i), retrievalContexts.get(i),
                    executorResults.get(i), req, resp));
        }
        pmansProfiler.mark("After data source resolution");

        for (int i = 0; i < pvNames.size(); i++) {
            // Data is retrieved here
            List<Future<EventStream>> eventStreamFutures = getEventStreamFuturesFromRetrievalResults(
                    executorResults.get(i), listOfRetrievalResultFuturesLists.get(i));
            listOfEventStreamFuturesLists.add(eventStreamFutures);
        }

    } catch (Exception ex) {
        if (ex != null && ex.toString() != null && ex.toString().contains("ClientAbortException")) {
            // We check for ClientAbortException etc this way to avoid including tomcat jars in the build path.
            logger.debug("Exception when retrieving data ", ex);
        } else {
            logger.error("Exception when retrieving data " + "-->" + ex.toString(), ex);
        }
    }

    long s1 = System.currentTimeMillis();
    String currentlyProcessingPV = null;

    /*
     * The following try bracket goes through each of the streams in the list of event stream futures.
     * 
     * It is intended that the process goes through one PV at a time.
     */
    try {
        for (int i = 0; i < pvNames.size(); i++) {
            List<Future<EventStream>> eventStreamFutures = listOfEventStreamFuturesLists.get(i);
            String pvName = pvNames.get(i);
            PVTypeInfo typeInfo = typeInfos.get(i);
            HashMap<String, String> engineMetadata = fetchLatestMetadata ? engineMetadatas.get(i) : null;
            PostProcessor postProcessor = postProcessors.get(i);

            logger.debug("Done with the RetrievalResults; moving onto the individual event stream "
                    + "from each source for " + StringUtils.join(pvNames, ", "));
            pmansProfiler.mark("After retrieval results");
            for (Future<EventStream> future : eventStreamFutures) {
                EventStreamDesc sourceDesc = null;

                // Gets the result of a data retrieval
                try (EventStream eventStream = future.get()) {
                    sourceDesc = null; // Reset it for each loop iteration.
                    sourceDesc = eventStream.getDescription();
                    if (sourceDesc == null) {
                        logger.warn("Skipping event stream without a desc for pv " + pvName);
                        continue;
                    }

                    logger.debug("Processing event stream for pv " + pvName + " from source "
                            + ((eventStream.getDescription() != null) ? eventStream.getDescription().getSource()
                                    : " unknown"));

                    try {
                        mergeTypeInfo(typeInfo, sourceDesc, engineMetadata);
                    } catch (MismatchedDBRTypeException mex) {
                        logger.error(mex.getMessage(), mex);
                        continue;
                    }

                    if (currentlyProcessingPV == null || !currentlyProcessingPV.equals(pvName)) {
                        logger.debug("Switching to new PV " + pvName + " In some mime responses we insert "
                                + "special headers at the beginning of the response. Calling the hook for "
                                + "that");
                        currentlyProcessingPV = pvName;
                        /*
                         * Goes through the PB data stream over a period of time. The relevant MIME response
                         * actually deal with the processing of the PV. `start` and `end` refer to the very
                         * beginning and very end of the time period being retrieved over, regardless of
                         * whether it is divided up or not.
                         */
                        mergeDedupCountingConsumer.processingPV(currentlyProcessingPV, start, end,
                                (eventStream != null) ? sourceDesc : null);
                    }

                    try {
                        // If the postProcessor does not have a consolidated event stream, we send each eventstream across as we encounter it.
                        // Else we send the consolidatedEventStream down below.
                        if (!(postProcessor instanceof PostProcessorWithConsolidatedEventStream)) {
                            /*
                             * The eventStream object contains all the data over the current period.
                             */
                            mergeDedupCountingConsumer.consumeEventStream(eventStream);
                            resp.flushBuffer();
                        }
                    } catch (Exception ex) {
                        if (ex != null && ex.toString() != null
                                && ex.toString().contains("ClientAbortException")) {
                            // We check for ClientAbortException etc this way to avoid including tomcat jars in the build path.
                            logger.debug(
                                    "Exception when consuming and flushing data from " + sourceDesc.getSource(),
                                    ex);
                        } else {
                            logger.error("Exception when consuming and flushing data from "
                                    + sourceDesc.getSource() + "-->" + ex.toString(), ex);
                        }
                    }
                    pmansProfiler.mark("After event stream " + eventStream.getDescription().getSource());
                } catch (Exception ex) {
                    if (ex != null && ex.toString() != null && ex.toString().contains("ClientAbortException")) {
                        // We check for ClientAbortException etc this way to avoid including tomcat jars in the build path.
                        logger.debug("Exception when consuming and flushing data from "
                                + (sourceDesc != null ? sourceDesc.getSource() : "N/A"), ex);
                    } else {
                        logger.error("Exception when consuming and flushing data from "
                                + (sourceDesc != null ? sourceDesc.getSource() : "N/A") + "-->" + ex.toString(),
                                ex);
                    }
                }
            }

            // TODO Go through data from other appliances here

            if (postProcessor instanceof PostProcessorWithConsolidatedEventStream) {
                try (EventStream eventStream = ((PostProcessorWithConsolidatedEventStream) postProcessor)
                        .getConsolidatedEventStream()) {
                    EventStreamDesc sourceDesc = eventStream.getDescription();
                    if (sourceDesc == null) {
                        logger.error("Skipping event stream without a desc for pv " + pvName
                                + " and post processor " + postProcessor.getExtension());
                    } else {
                        mergeDedupCountingConsumer.consumeEventStream(eventStream);
                        resp.flushBuffer();
                    }
                }
            }

            // If the postProcessor needs to send final data across, give it a chance now...
            if (postProcessor instanceof AfterAllStreams) {
                EventStream finalEventStream = ((AfterAllStreams) postProcessor).anyFinalData();
                if (finalEventStream != null) {
                    mergeDedupCountingConsumer.consumeEventStream(finalEventStream);
                    resp.flushBuffer();
                }
            }

            pmansProfiler.mark("After writing all eventstreams to response");
        }
    } catch (Exception ex) {
        if (ex != null && ex.toString() != null && ex.toString().contains("ClientAbortException")) {
            // We check for ClientAbortException etc this way to avoid including tomcat jars in the build path.
            logger.debug("Exception when retrieving data ", ex);
        } else {
            logger.error("Exception when retrieving data " + "-->" + ex.toString(), ex);
        }
    }

    long s2 = System.currentTimeMillis();
    logger.info("For the complete request, found a total of " + mergeDedupCountingConsumer.totalEventsForAllPVs
            + " in " + (s2 - s1) + "(ms)" + " skipping " + mergeDedupCountingConsumer.skippedEventsForAllPVs
            + " events" + " deduping involved " + mergeDedupCountingConsumer.comparedEventsForAllPVs
            + " compares.");

    pmansProfiler.mark("After all closes and flushing all buffers");

    // Till we determine all the if conditions where we log this, we log sparingly..
    if (pmansProfiler.totalTimeMS() > 5000) {
        logger.error("Retrieval time for " + StringUtils.join(pvNames, ", ") + " from " + startTimeStr + " to "
                + endTimeStr + ": " + pmansProfiler.toString());
    }

    mergeDedupCountingConsumer.close();
}

From source file:tokyo.northside.jrst.JRSTLexer.java

/**
 * read table simple and complexe//from  www  .java 2s.c  o  m
 * 
 * <pre>
 * +------------------------+------------+----------+----------+
 * | Header row, column 1   | Header 2   | Header 3 | Header 4 |
 * | (header rows optional) |            |          |          |
 * +========================+============+==========+==========+
 * | body row 1, column 1   | column 2   | column 3 | column 4 |
 * +------------------------+------------+----------+----------+
 * | body row 2             | Cells may span columns.          |
 * +------------------------+------------+---------------------+
 * </pre>
 * 
 * @return Element
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public Element peekTable() throws IOException {
    beginPeek();

    Element result = null;
    // in.skipBlankLines();
    String line = in.readLine();

    if (line != null) {
        Pattern pTableBegin = Pattern.compile("^\\s*(\\+-+)+\\+\\s*$");
        Matcher matcher = null;

        matcher = pTableBegin.matcher(line);
        if (matcher.matches()) { // complexe table
            result = DocumentHelper.createElement(TABLE);
            result.addAttribute(TABLE_HEADER, FALSE);
            int level = level(line);
            result.addAttribute(LEVEL, String.valueOf(level));
            line = line.trim();
            int tableWidth = line.length();
            result.addAttribute(TABLE_WIDTH, String.valueOf(tableWidth));

            Pattern pCellEnd = Pattern
                    .compile("^\\s{" + level + "}(\\+-+\\+|\\|(?:[^+]+))([^+]+(?:\\+|\\|\\s*$)|-+\\+)*\\s*"); // fin
            // de
            // ligne
            Pattern pCell = Pattern.compile("^\\s{" + level + "}(\\|[^|]+)+\\|\\s*$"); // une ligne
            Pattern pHeader = Pattern.compile("^\\s{" + level + "}(\\+=+)+\\+\\s*$"); // fin du header
            Pattern pEnd = Pattern.compile("^\\s{" + level + "}(\\+-+)+\\+\\s*$"); // fin de table

            // used to know if | is cell separator or not
            String lastSeparationLine = line;
            String lastLine = line;

            Element row = DocumentHelper.createElement(ROW);
            String[] table = in.readUntilBlank();

            boolean done = false;
            for (String l : table) {
                done = false;
                l = l.trim();
                if (l.length() != tableWidth) {
                    // Erreur dans la table, peut-etre lever une exception ?
                    result = null;
                    break;
                }
                matcher = pEnd.matcher(l);
                if (!done && matcher.matches()) {
                    // fin normale de ligne, on peut directement l'assigner
                    lastSeparationLine = l;
                    for (Element cell : (List<Element>) row.elements()) {
                        cell.addAttribute(CELL_END, TRUE);
                    }
                    row.addAttribute(ROW_END_HEADER, FALSE);
                    result.add(row);
                    row = DocumentHelper.createElement(ROW);
                    done = true;
                }
                matcher = pHeader.matcher(l);
                if (!done && matcher.matches()) {
                    // fin de du header, on peut directement l'assigner
                    lastSeparationLine = l;
                    for (Element cell : (List<Element>) row.elements()) {
                        cell.addAttribute(CELL_END, TRUE);
                    }
                    row.addAttribute(ROW_END_HEADER, TRUE);
                    result.add(row);
                    result.addAttribute(TABLE_HEADER, TRUE);
                    row = DocumentHelper.createElement(ROW);
                    done = true;
                }
                matcher = pCell.matcher(l);
                if (!done && matcher.matches()) {
                    // debug
                    row.addAttribute("debug", "pCell");
                    // recuperation des textes des cellules
                    int start = -1;
                    String content = "";
                    matcher = Pattern.compile("([^|]+)\\|").matcher(l);
                    for (int cellNumber = 0; matcher.find(); cellNumber++) {
                        int tmpstart = matcher.start(1);
                        int end = matcher.end(1);
                        String tmpcontent = matcher.group(1);
                        // on a forcement un | ou un + au dessus du +
                        // et forcement un + sur lastSeparationLine
                        // sinon ca veut dire qu'il y avait un | dans la
                        // cell
                        if ((lastLine.charAt(end) == '|' || lastLine.charAt(end) == '+')
                                && lastSeparationLine.charAt(end) == '+') {
                            if ("".equals(content)) {
                                content = tmpcontent;
                            } else {
                                content += tmpcontent;
                            }
                            if (start == -1) {
                                start = tmpstart;
                            }
                            Element cell = null;
                            if (row.nodeCount() <= cellNumber) {
                                cell = row.addElement(CELL);
                                cell.addAttribute(CELL_END, FALSE);
                            } else {
                                cell = (Element) row.node(cellNumber);
                            }
                            cell.addAttribute(CELL_INDEX_START, String.valueOf(start));
                            cell.addAttribute(CELL_INDEX_END, String.valueOf(end));
                            cell.setText(cell.getText() + content + "\n");
                            start = end + 1; // +1 to pass + or | at end
                            // of cell
                            content = "";
                        } else {
                            // start = tmpstart;
                            if (start == -1) {
                                start = tmpstart;
                            }
                            content += tmpcontent + "|";
                            cellNumber--;
                        }
                    }
                    done = true;
                }
                matcher = pCellEnd.matcher(l);
                if (!done && matcher.matches()) {
                    // debug
                    row.addAttribute("debug", "pCellEnd");
                    // fin d'une ligne, on ne peut pas l'assigner
                    // directement
                    // pour chaque continuation de cellule, il faut copier
                    // l'ancienne valeur

                    // mais on commence tout de meme par fermer tout les
                    // cells
                    for (Element cell : (List<Element>) row.elements()) {
                        cell.addAttribute(CELL_END, TRUE);
                    }

                    StringBuffer tmp = new StringBuffer(l);
                    int start = -1;
                    String content = "";
                    matcher = Pattern.compile("([^+|]+|-+)([+|])").matcher(l);
                    for (int cellNumber = 0; matcher.find(); cellNumber++) {
                        int tmpstart = matcher.start(1);
                        int end = matcher.end(1);
                        String tmpcontent = matcher.group(1);
                        String ender = matcher.group(2);
                        if (!tmpcontent.matches("-+")) {
                            // on a forcement un | au dessus du + ou du |
                            // sinon ca veut dire qu'il y avait un + dans la
                            // cell
                            if (lastLine.charAt(end) == '|') {
                                if (start == -1) {
                                    start = tmpstart;
                                }
                                // -1 and +1 to take the + or | at begin and
                                // end
                                String old = lastSeparationLine.substring(start - 1, end + 1);
                                tmp.replace(start - 1, end + 1, old);
                                if ("".equals(content)) {
                                    content = tmpcontent;
                                }
                                Element cell = null;
                                if (row.nodeCount() <= cellNumber) {
                                    cell = row.addElement(CELL);
                                } else {
                                    cell = (Element) row.node(cellNumber);

                                }
                                cell.setText(cell.getText() + content + "\n");
                                // on a ajouter des choses dans la cell,
                                // donc
                                // ce n'est pas la fin
                                cell.addAttribute(CELL_END, FALSE);
                                cell.addAttribute(CELL_INDEX_START, String.valueOf(start));
                                cell.addAttribute(CELL_INDEX_END, String.valueOf(end));
                                start = end + 1; // +1 to pass + or | at
                                // end of cell
                                content = "";
                            } else {
                                // start = tmpstart;
                                content += tmpcontent + ender;
                            }
                        }
                    }
                    lastSeparationLine = tmp.toString();
                    row.addAttribute(ROW_END_HEADER, FALSE);
                    result.add(row);
                    row = DocumentHelper.createElement(ROW);
                    done = true;
                }
                if (!done) {
                    log.warn("Bad table format line " + in.getLineNumber());
                }
                lastLine = l;
            }

            //
            // line += "\n" + joinBlock(table, "\n", false);
            //
            // result.addText(line);
        } else if (line.matches("^\\s*(=+ +)+=+\\s*$")) {
            // Les donnees de la table peuvent depasser de celle-ci
            /*
             * ===== ===== ====== Inputs Output ------------ ------ A B A or
             * B ===== ===== ====== False False Second column of row 1. True
             * False Second column of row 2.
             * 
             * True 2 - Second column of row 3.
             * 
             * - Second item in bullet list (row 3, column 2). ============
             * ======
             */

            result = DocumentHelper.createElement(TABLE);
            line = line.trim();
            Pattern pBordersEquals = Pattern.compile("^\\s*(=+ +)+=+\\s*$"); // Separation
            // =
            Pattern pBordersTiret = Pattern.compile("^\\s*(-+ +)+-+\\s*$"); // Separation
            // -
            Pattern pBorders = Pattern.compile("^\\s*([=-]+ +)+[=-]+\\s*$"); // =
            // ou
            // -
            String[] table = in.readUntilBlank(); // Recuperation de la
            // table

            int tableWidth = line.length();
            int nbSeparations = 0;
            for (String l : table) {
                if (l.length() > tableWidth) {
                    tableWidth = l.length(); // Determination de la
                } // Determination de la
                  // longueur max
                matcher = pBordersEquals.matcher(l);
                if (matcher.matches()) {
                    nbSeparations++;
                }

            }
            // Header if the table contains 3 equals separations
            result.addAttribute(TABLE_HEADER, "" + (nbSeparations == 2));
            int level = level(line);
            result.addAttribute(LEVEL, String.valueOf(level));
            result.addAttribute(TABLE_WIDTH, String.valueOf(tableWidth + 1));
            Element row = DocumentHelper.createElement(ROW);
            // Determination of the columns positions
            List<Integer> columns = new LinkedList<Integer>();
            matcher = Pattern.compile("=+\\s+").matcher(line);
            for (int cellNumber = 0; matcher.find(); cellNumber++) {
                columns.add(matcher.end());
            }
            columns.add(tableWidth);

            // Traitement du tbl
            /*
             * ===== ===== ====== Inputs Output ------------ ------ A B A or
             * B ===== ===== ====== False False Second column of row 1. True
             * False Second column of row 2.
             * 
             * True 2 - Second column of row 3.
             * 
             * - Second item in bullet list (row 3, column 2). ============
             * ====== devient l'equivalent : ===== ===== ====== Inputs
             * Output ------------ ------ A B A or B ===== ===== ======
             * False False Second column of row 1. ----- ----- ------ True
             * False Second column of row 2. ----- ----- ------ True 2 -
             * Second column of row 3. - Second item in bullet list (row 3,
             * column 2). ============ ======
             */
            String lineRef = line.replace('=', '-');
            Matcher matcher2;
            List<String> tableTmp = new LinkedList<String>();

            for (int i = 0; i < table.length - 1; i++) {
                tableTmp.add(table[i]);
                if (!table[i].equals("")) {
                    if (!table[i + 1].substring(0, columns.get(0)).matches("\\s*")) {
                        matcher = pBorders.matcher(table[i]);
                        matcher2 = pBorders.matcher(table[i + 1]);
                        if (!matcher.matches() && !matcher2.matches() && !table[i + 1].equals("")) {
                            tableTmp.add(lineRef);
                        }
                    }
                }
            }
            tableTmp.add(table[table.length - 1]);
            table = new String[tableTmp.size()];
            for (int i = 0; i < tableTmp.size(); i++) {
                table[i] = tableTmp.get(i);
            }

            boolean done = false;
            LinkedList<String> lastLines = new LinkedList<String>();
            int separation = 1;
            for (String l : table) {
                if (l != null) {
                    done = false;
                    matcher = pBordersTiret.matcher(l);
                    matcher2 = pBordersEquals.matcher(l);
                    if (matcher.matches() || matcher2.matches()) { // Intermediate
                        // separation
                        while (!lastLines.isEmpty()) {
                            matcher = Pattern.compile("[-=]+\\s*").matcher(l);
                            String tmpLine = lastLines.getLast();
                            lastLines.removeLast();
                            int cellNumber;
                            for (cellNumber = 0; matcher.find(); cellNumber++) {
                                Element cell = null;
                                if (row.nodeCount() <= cellNumber) {
                                    cell = row.addElement(CELL);
                                } else {
                                    cell = (Element) row.node(cellNumber);
                                }
                                if (matcher.start() < tmpLine.length()) {
                                    if (columns.size() - 1 == cellNumber) {
                                        cell.setText(
                                                tmpLine.substring(matcher.start(), tmpLine.length()) + "\n");
                                    } else {
                                        if (matcher.end() < tmpLine.length()) {
                                            cell.setText(
                                                    tmpLine.substring(matcher.start(), matcher.end()) + "\n");
                                        } else {
                                            cell.setText(tmpLine.substring(matcher.start(), tmpLine.length())
                                                    + "\n");
                                        }
                                    }
                                }

                                if (lastLines.size() == 0) {
                                    row.addAttribute("debug", "pCell");
                                    cell.addAttribute(CELL_END, TRUE);
                                } else {
                                    row.addAttribute("debug", "pCellEnd");
                                    cell.addAttribute(CELL_END, FALSE);
                                }
                                cell.addAttribute(CELL_INDEX_START, String.valueOf(matcher.start() + 1));
                                if (line.length() == matcher.end()) {
                                    cell.addAttribute(CELL_INDEX_END,
                                            String.valueOf(columns.get(columns.size() - 1)));
                                } else {
                                    cell.addAttribute(CELL_INDEX_END, String.valueOf(matcher.end()));
                                }
                            }

                            if (matcher2.matches()) {
                                separation++;
                                row.addAttribute(ROW_END_HEADER, "" + (separation == 2));
                            } else {
                                row.addAttribute(ROW_END_HEADER, FALSE);
                            }

                            result.add(row);
                            row = DocumentHelper.createElement(ROW);
                            done = true;
                        }
                    }
                    if (!done && l.matches("^\\s*(.+ +)+.+\\s*$")) {
                        // Data
                        lastLines.addFirst(l); // Les donnees sont stoquee
                        // dans une file d'attente
                        // lastLines (FIFO)
                        done = true;
                    }
                    if (!done) {
                        log.warn("Bad table format line " + in.getLineNumber());
                    }
                }
            }
        }
    }
    endPeek();

    return result;
}

From source file:org.hyperic.hq.measurement.server.session.AvailabilityManagerImpl.java

private PageList<HighLowMetricValue> getPageList(List<AvailabilityDataRLE> availInfo, long begin, long end,
        long interval, boolean prependUnknowns) {
    PageList<HighLowMetricValue> rtn = new PageList<HighLowMetricValue>();
    for (Iterator<AvailabilityDataRLE> it = availInfo.iterator(); it.hasNext();) {
        AvailabilityDataRLE rle = it.next();
        long availStartime = rle.getStartime();
        long availEndtime = rle.getEndtime();
        //skip measurements that are before first time slot
        if (availEndtime < begin) {
            continue;
        }// w  w w . j a v a 2s . co m
        LinkedList<AvailabilityDataRLE> queue = new LinkedList<AvailabilityDataRLE>();
        queue.add(rle);
        int i = 0;
        for (long curr = begin; curr <= end; curr += interval) {
            long next = curr + interval;
            next = (next > end) ? end : next;
            long endtime = ((AvailabilityDataRLE) queue.getFirst()).getEndtime();
            //while next time slot is after measurement time range
            while (next > endtime) {
                // it should not be the case that there are no more
                // avails in the array, but we need to handle it
                if (it.hasNext()) {
                    AvailabilityDataRLE tmp = (AvailabilityDataRLE) it.next();
                    queue.addFirst(tmp);
                    endtime = tmp.getEndtime();
                } else {
                    endtime = availEndtime;
                    int measId = rle.getMeasurement().getId().intValue();
                    String msg = "Measurement, " + measId + ", for interval " + begin + " - " + end
                            + " did not return a value for range " + curr + " - " + (curr + interval);
                    log.warn(msg);
                }
            }
            endtime = availEndtime;
            while (curr > endtime) {
                queue.removeLast();
                // this should not happen unless the above !it.hasNext()
                // else condition is true
                if (queue.size() == 0) {
                    rle = new AvailabilityDataRLE(rle.getMeasurement(), rle.getEndtime(), next, AVAIL_UNKNOWN);
                    queue.addLast(rle);
                }
                rle = (AvailabilityDataRLE) queue.getLast();
                availStartime = rle.getStartime();
                availEndtime = rle.getEndtime();
                endtime = availEndtime;
            }
            HighLowMetricValue val;
            if (curr >= availStartime) {
                val = getMetricValue(queue, curr);
            } else if (prependUnknowns) {
                val = new HighLowMetricValue(AVAIL_UNKNOWN, curr);
                val.incrementCount();
            } else {
                i++;
                continue;
            }
            if (rtn.size() <= i) {
                rtn.add(round(val));
            } else {
                updateMetricValue(val, (HighLowMetricValue) rtn.get(i));
            }
            i++;
        }
    }
    if (rtn.size() == 0) {
        rtn.addAll(getDefaultHistoricalAvail(end));
    }
    return rtn;
}

From source file:org.seasr.meandre.components.analytics.socialnetworking.AbstractLinkCreationComponent.java

@Override
public void executeCallBack(ComponentContext cc) throws Exception {
    Strings inMetaTuple = (Strings) cc.getDataComponentFromInput(IN_META_TUPLE);
    SimpleTuplePeer tuplePeer = new SimpleTuplePeer(inMetaTuple);
    console.fine("Input meta tuple: " + tuplePeer.toString());

    StringsArray inTuples = (StringsArray) cc.getDataComponentFromInput(IN_TUPLES);
    Strings[] tuples = BasicDataTypesTools.stringsArrayToJavaArray(inTuples);

    int SENTENCE_ID_IDX = tuplePeer.getIndexForFieldName(OpenNLPNamedEntity.SENTENCE_ID_FIELD);
    int TYPE_IDX = tuplePeer.getIndexForFieldName(OpenNLPNamedEntity.TYPE_FIELD);
    int TEXT_IDX = tuplePeer.getIndexForFieldName(OpenNLPNamedEntity.TEXT_FIELD);

    // Linked list of sentences keyed by sentence id - the HashSet is the set of entities in that sentence
    LinkedList<KeyValuePair<Integer, HashSet<Entity>>> _sentencesWindow = new LinkedList<KeyValuePair<Integer, HashSet<Entity>>>();

    // Note: The algorithm used to mark entities as adjacent if they fall within the specified sentence distance
    //       relies on a sliding-window of sentences that are within the 'adjacency' range. As new sentences are
    //       considered, the window moves to the right and old sentences that are now too far fall out of scope.

    SimpleTuple tuple = tuplePeer.createTuple();
    for (Strings t : tuples) {
        tuple.setValues(t);//from ww w  .  j  av a2  s .c o m

        Integer sentenceId = Integer.parseInt(tuple.getValue(SENTENCE_ID_IDX));
        String tupleType = tuple.getValue(TYPE_IDX);
        String tupleValue = tuple.getValue(TEXT_IDX);

        // If the entity is of the type we're interested in
        if (_entityTypes.contains(tupleType)) {

            if (_normalizeEntities) {
                // Normalize whitespaces
                StringBuilder sb = new StringBuilder();
                Matcher nonWhitespaceMatcher = REGEXP_NONWHITESPACE.matcher(tupleValue);
                while (nonWhitespaceMatcher.find())
                    sb.append(" ").append(nonWhitespaceMatcher.group(1));

                if (sb.length() > 0)
                    tupleValue = sb.substring(1);
                else
                    continue;

                // Normalize people's names
                if (tupleType.toLowerCase().equals("person")) {
                    sb = new StringBuilder();
                    Matcher personMatcher = REGEXP_PERSON.matcher(tupleValue);
                    while (personMatcher.find())
                        sb.append(" ").append(personMatcher.group(1));

                    if (sb.length() > 0)
                        tupleValue = sb.substring(1);
                    else
                        continue;

                    // ignore names with 1 character
                    if (tupleValue.length() == 1)
                        continue;
                }

                tupleValue = WordUtils.capitalizeFully(tupleValue);
            }

            // ... create an object for it
            Entity entity = new Entity(tupleType, tupleValue);

            // Check if we already recorded this entity before
            Entity oldEntity = _entities.get(entity);
            if (oldEntity == null)
                // If not, record it
                _entities.put(entity, entity);
            else
                // Otherwise retrieve the entity we used before
                entity = oldEntity;

            HashSet<Entity> sentenceEntities;

            // Remove all sentences (together with any entities they contained) from the set
            // of sentences that are too far from the current sentence of this entity
            while (_sentencesWindow.size() > 0 && sentenceId - _sentencesWindow.peek().getKey() > _offset)
                _sentencesWindow.remove();

            if (_sentencesWindow.size() > 0) {
                // If this sentence is different from the last sentence in the window
                if (_sentencesWindow.getLast().getKey() != sentenceId) {
                    // Create an entry for it and add it at the end of the window
                    sentenceEntities = new HashSet<Entity>();
                    _sentencesWindow
                            .addLast(new KeyValuePair<Integer, HashSet<Entity>>(sentenceId, sentenceEntities));
                } else
                    sentenceEntities = _sentencesWindow.getLast().getValue();
            } else {
                // If there are no sentences in the window, create an entry for this sentence and add it
                sentenceEntities = new HashSet<Entity>();
                _sentencesWindow
                        .addLast(new KeyValuePair<Integer, HashSet<Entity>>(sentenceId, sentenceEntities));
            }

            // Iterate through all the sentences in the window
            for (KeyValuePair<Integer, HashSet<Entity>> kvp : _sentencesWindow)
                // ... and all the entities in each sentence
                for (Entity e : kvp.getValue()) {
                    // ignore self-references
                    if (e.equals(entity))
                        continue;

                    // ... and mark the new entity as being adjacent to all the entities in the window
                    e.addOutwardLink(entity);
                    entity.addInwardLink(e);
                }

            // Add the new entity to the window
            sentenceEntities.add(entity);
        }
    }

    if (!_isStreaming)
        generateAndPushOutputInternal();
}

From source file:de.innovationgate.wgpublisher.webtml.utils.TMLContext.java

public static TMLContext getThreadMainContext() {

    LinkedList<TMLContext> contexts = _threadMainContexts.get();
    if (contexts != null && contexts.size() > 0) {
        return contexts.getLast();
    } else {/*from  w  w  w  . j a  v a  2s . c o m*/
        return null;
    }

}

From source file:com.edgenius.wiki.render.filter.MacroFilter.java

/**
 *///  www .  j  a v a  2  s.  c o m
private void checkGroup(final int initPos, CharSequence input, final LinkedList<GroupProcessor> stack,
        List<GroupProcessor> processors) {
    final List<Region> pairRegions = new ArrayList<Region>();

    singleMacroProvider.replaceByTokenVisitor(input, new TokenVisitor<Matcher>() {
        public void handleMatch(StringBuffer buffer, Matcher result) {
            String macroName = result.group(1);
            if (macroName != null && !macroName.startsWith("$")) {
                Macro macro = macroMgr.getMacro(macroName);

                if (macro != null) {
                    //IMPORTANT: here does not check Macro.isPair() and also put it into pairRegions for following process
                    //it is the sequence of process must keep consistant with physical sequence in input text, 
                    //for example, {table}{cell}...{rowdiv}, rowdiv is single and must be after cell
                    int start = result.start(0);
                    int end = result.end(0);
                    Region pair = new Region(start, end);
                    //no parameter, then mark as unknown, otherwise, must be a start macro
                    if (StringUtils.isBlank(result.group(2))) {
                        pair.setKey(MACRO_REGION_KEY_UNKNOWN);
                    } else {
                        pair.setKey(MACRO_REGION_KEY_START);
                    }

                    //just for temporary to remember the macro name...
                    pair.setContent(macroName);
                    //sum to list
                    pairRegions.add(pair);
                }
            }
        }
    });

    int size = pairRegions.size();
    if (size > 0) {
        StringBuffer inputBuf = new StringBuffer(input);
        for (int idx = 0; idx < size; idx++) {
            Region reg = pairRegions.get(idx);
            Macro macro = macroMgr.getMacro(reg.getContent());
            if (macro.isPaired()) {
                int deep = 0;
                Region pair = null;
                //looking for pairs...
                for (int chIdx = idx + 1; chIdx < size; chIdx++) {
                    Region next = pairRegions.get(chIdx);
                    if (StringUtils.equalsIgnoreCase(reg.getContent(), next.getContent())) {
                        //start is unknown (no attribute), then end must be unknown
                        if (MACRO_REGION_KEY_UNKNOWN.equals(reg.getKey())
                                && MACRO_REGION_KEY_UNKNOWN.equals(next.getKey())) {
                            //matched
                            pair = next;
                            //skip all internal node - which is handle by embedded recursive
                            idx = chIdx;
                            break;
                        }

                        if (MACRO_REGION_KEY_START.equals(reg.getKey())
                                && MACRO_REGION_KEY_UNKNOWN.equals(next.getKey())) {
                            if (deep == 0) {
                                //matched;
                                pair = next;
                                //skip all internal node - which is handle by embedded recursive
                                idx = chIdx;
                                break;
                            } else {
                                //just another inner same name macro matched, deep minus 1
                                deep--;
                            }
                        }
                        if (MACRO_REGION_KEY_START.equals(next.getKey())) {
                            //ok, it gets another start, in 4th scenarios - then add deep
                            deep++;
                        }
                    }
                }
                //ok, success find paired
                if (pair != null) {
                    int start = initPos + reg.getStart();
                    int end = initPos + pair.getEnd();
                    int contentStart = initPos + reg.getEnd();
                    int contentEnd = initPos + pair.getStart();

                    GroupProcessor currProcessor = stack.size() == 0 ? null : stack.getLast();
                    if (currProcessor != null) {
                        currProcessor.adoptChild(macro, start, end);
                    }

                    if (macro.isProcessEmbedded() && (end > start)) {
                        if (macro.hasChildren() != null) {
                            stack.add(((GroupProcessorMacro) macro).newGroupProcessor(macro, start, end));
                        }
                        checkGroup(contentStart,
                                inputBuf.subSequence(contentStart - initPos, contentEnd - initPos), stack,
                                processors);
                        if (macro.hasChildren() != null) {
                            //pop the current one, means it is a completed GroupProcessor
                            processors.add(stack.removeLast());
                        }
                    }
                }
            } else {
                //single macro - directly detect if it is child
                GroupProcessor currProcessor = stack.size() == 0 ? null : stack.getLast();
                if (currProcessor != null) {
                    currProcessor.adoptChild(macro, initPos + reg.getStart(), initPos + reg.getEnd());
                }
            }
        }
    }
}

From source file:org.alfresco.solr.query.Solr4QueryParser.java

@SuppressWarnings("unchecked")
protected Query getFieldQueryImpl(String field, String queryText, AnalysisMode analysisMode,
        LuceneFunction luceneFunction) throws ParseException, IOException {
    // make sure the field exists or return a dummy query so we have no error ....ACE-3231
    SchemaField schemaField = schema.getFieldOrNull(field);
    boolean isNumeric = false;
    if (schemaField == null) {
        return new TermQuery(new Term("_dummy_", "_miss_"));
    } else {//from   w w  w.ja  v a  2s .  c om
        isNumeric = (schemaField.getType().getNumericType() != null);
    }

    // Use the analyzer to get all the tokens, and then build a TermQuery,
    // PhraseQuery, or noth

    // TODO: Untokenised columns with functions require special handling

    if (luceneFunction != LuceneFunction.FIELD) {
        throw new UnsupportedOperationException(
                "Field queries are not supported on lucene functions (UPPER, LOWER, etc)");
    }

    // if the incoming string already has a language identifier we strip it iff and addit back on again

    String localePrefix = "";

    String toTokenise = queryText;

    if (queryText.startsWith("{")) {
        int position = queryText.indexOf("}");
        if (position > 0) {
            String language = queryText.substring(0, position + 1);
            Locale locale = new Locale(queryText.substring(1, position));
            String token = queryText.substring(position + 1);
            boolean found = false;
            for (Locale current : Locale.getAvailableLocales()) {
                if (current.toString().equalsIgnoreCase(locale.toString())) {
                    found = true;
                    break;
                }
            }
            if (found) {
                localePrefix = language;
                toTokenise = token;
            } else {
                //toTokenise = token;
            }
        }
    }

    String testText = toTokenise;
    boolean requiresMLTokenDuplication = false;
    String localeString = null;
    if (isPropertyField(field) && (localePrefix.length() == 0)) {
        if ((queryText.length() > 0) && (queryText.charAt(0) == '\u0000')) {
            int position = queryText.indexOf("\u0000", 1);
            testText = queryText.substring(position + 1);
            requiresMLTokenDuplication = true;
            localeString = queryText.substring(1, position);

        }
    }

    // find the positions of any escaped * and ? and ignore them

    Set<Integer> wildcardPoistions = getWildcardPositions(testText);

    TokenStream source = null;
    ArrayList<org.apache.lucene.analysis.Token> list = new ArrayList<org.apache.lucene.analysis.Token>();
    boolean severalTokensAtSamePosition = false;
    org.apache.lucene.analysis.Token nextToken;
    int positionCount = 0;

    try {
        org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token();

        source = getAnalyzer().tokenStream(field, new StringReader(toTokenise));
        source.reset();
        while (source.incrementToken()) {
            CharTermAttribute cta = source.getAttribute(CharTermAttribute.class);
            OffsetAttribute offsetAtt = source.getAttribute(OffsetAttribute.class);
            TypeAttribute typeAtt = null;
            if (source.hasAttribute(TypeAttribute.class)) {
                typeAtt = source.getAttribute(TypeAttribute.class);
            }
            PositionIncrementAttribute posIncAtt = null;
            if (source.hasAttribute(PositionIncrementAttribute.class)) {
                posIncAtt = source.getAttribute(PositionIncrementAttribute.class);
            }
            nextToken = new Token(cta.buffer(), 0, cta.length(), offsetAtt.startOffset(),
                    offsetAtt.endOffset());
            if (typeAtt != null) {
                nextToken.setType(typeAtt.type());
            }
            if (posIncAtt != null) {
                nextToken.setPositionIncrement(posIncAtt.getPositionIncrement());
            }

            list.add(nextToken);
            if (nextToken.getPositionIncrement() != 0)
                positionCount += nextToken.getPositionIncrement();
            else
                severalTokensAtSamePosition = true;
        }
    } catch (SolrException e) {
        // MNT-15336
        // Text against a numeric field should fail silently rather then tell you it is not possible.
        if (isNumeric && e.getMessage() != null && e.getMessage().startsWith("Invalid Number:")) {
            // Generate a query that does not match any document - rather than nothing
            return createNoMatchQuery();
        } else {
            throw e;
        }
    } finally {
        try {
            if (source != null) {
                source.close();
            }
        } catch (IOException e) {
            // ignore
        }
    }

    // add any alpha numeric wildcards that have been missed
    // Fixes most stop word and wild card issues

    for (int index = 0; index < testText.length(); index++) {
        char current = testText.charAt(index);
        if (((current == '*') || (current == '?')) && wildcardPoistions.contains(index)) {
            StringBuilder pre = new StringBuilder(10);
            if (index == 0) {
                // "*" and "?" at the start

                boolean found = false;
                for (int j = 0; j < list.size(); j++) {
                    org.apache.lucene.analysis.Token test = list.get(j);
                    if ((test.startOffset() <= 0) && (0 < test.endOffset())) {
                        found = true;
                        break;
                    }
                }
                if (!found && (list.size() == 0)) {
                    // Add new token followed by * not given by the tokeniser
                    org.apache.lucene.analysis.Token newToken = new org.apache.lucene.analysis.Token("", 0, 0);
                    newToken.setType("ALPHANUM");
                    if (requiresMLTokenDuplication) {
                        Locale locale = I18NUtil.parseLocale(localeString);
                        MLTokenDuplicator duplicator = new MLTokenDuplicator(locale,
                                MLAnalysisMode.EXACT_LANGUAGE);
                        Iterator<org.apache.lucene.analysis.Token> it = duplicator.buildIterator(newToken);
                        if (it != null) {
                            int count = 0;
                            while (it.hasNext()) {
                                list.add(it.next());
                                count++;
                                if (count > 1) {
                                    severalTokensAtSamePosition = true;
                                }
                            }
                        }
                    }
                    // content
                    else {
                        list.add(newToken);
                    }
                }
            } else if (index > 0) {
                // Add * and ? back into any tokens from which it has been removed

                boolean tokenFound = false;
                for (int j = 0; j < list.size(); j++) {
                    org.apache.lucene.analysis.Token test = list.get(j);
                    if ((test.startOffset() <= index) && (index < test.endOffset())) {
                        if (requiresMLTokenDuplication) {
                            String termText = test.toString();
                            int position = termText.indexOf("}");
                            String language = termText.substring(0, position + 1);
                            String token = termText.substring(position + 1);
                            if (index >= test.startOffset() + token.length()) {
                                test.setEmpty();
                                test.append(language + token + current);
                            }
                        } else {
                            if (index >= test.startOffset() + test.length()) {
                                test.setEmpty();
                                test.append(test.toString() + current);
                            }
                        }
                        tokenFound = true;
                        break;
                    }
                }

                if (!tokenFound) {
                    for (int i = index - 1; i >= 0; i--) {
                        char c = testText.charAt(i);
                        if (Character.isLetterOrDigit(c)) {
                            boolean found = false;
                            for (int j = 0; j < list.size(); j++) {
                                org.apache.lucene.analysis.Token test = list.get(j);
                                if ((test.startOffset() <= i) && (i < test.endOffset())) {
                                    found = true;
                                    break;
                                }
                            }
                            if (found) {
                                break;
                            } else {
                                pre.insert(0, c);
                            }
                        } else {
                            break;
                        }
                    }
                    if (pre.length() > 0) {
                        // Add new token followed by * not given by the tokeniser
                        org.apache.lucene.analysis.Token newToken = new org.apache.lucene.analysis.Token(
                                pre.toString(), index - pre.length(), index);
                        newToken.setType("ALPHANUM");
                        if (requiresMLTokenDuplication) {
                            Locale locale = I18NUtil.parseLocale(localeString);
                            MLTokenDuplicator duplicator = new MLTokenDuplicator(locale,
                                    MLAnalysisMode.EXACT_LANGUAGE);
                            Iterator<org.apache.lucene.analysis.Token> it = duplicator.buildIterator(newToken);
                            if (it != null) {
                                int count = 0;
                                while (it.hasNext()) {
                                    list.add(it.next());
                                    count++;
                                    if (count > 1) {
                                        severalTokensAtSamePosition = true;
                                    }
                                }
                            }
                        }
                        // content
                        else {
                            list.add(newToken);
                        }
                    }
                }
            }

            StringBuilder post = new StringBuilder(10);
            if (index > 0) {
                for (int i = index + 1; i < testText.length(); i++) {
                    char c = testText.charAt(i);
                    if (Character.isLetterOrDigit(c)) {
                        boolean found = false;
                        for (int j = 0; j < list.size(); j++) {
                            org.apache.lucene.analysis.Token test = list.get(j);
                            if ((test.startOffset() <= i) && (i < test.endOffset())) {
                                found = true;
                                break;
                            }
                        }
                        if (found) {
                            break;
                        } else {
                            post.append(c);
                        }
                    } else {
                        break;
                    }
                }
                if (post.length() > 0) {
                    // Add new token followed by * not given by the tokeniser
                    org.apache.lucene.analysis.Token newToken = new org.apache.lucene.analysis.Token(
                            post.toString(), index + 1, index + 1 + post.length());
                    newToken.setType("ALPHANUM");
                    if (requiresMLTokenDuplication) {
                        Locale locale = I18NUtil.parseLocale(localeString);
                        MLTokenDuplicator duplicator = new MLTokenDuplicator(locale,
                                MLAnalysisMode.EXACT_LANGUAGE);
                        Iterator<org.apache.lucene.analysis.Token> it = duplicator.buildIterator(newToken);
                        if (it != null) {
                            int count = 0;
                            while (it.hasNext()) {
                                list.add(it.next());
                                count++;
                                if (count > 1) {
                                    severalTokensAtSamePosition = true;
                                }
                            }
                        }
                    }
                    // content
                    else {
                        list.add(newToken);
                    }
                }
            }

        }
    }

    // Put in real position increments as we treat them correctly

    int curentIncrement = -1;
    for (org.apache.lucene.analysis.Token c : list) {
        if (curentIncrement == -1) {
            curentIncrement = c.getPositionIncrement();
        } else if (c.getPositionIncrement() > 0) {
            curentIncrement = c.getPositionIncrement();
        } else {
            c.setPositionIncrement(curentIncrement);
        }
    }

    // Remove small bits already covered in larger fragments 
    list = getNonContained(list);

    Collections.sort(list, new Comparator<org.apache.lucene.analysis.Token>() {

        public int compare(Token o1, Token o2) {
            int dif = o1.startOffset() - o2.startOffset();
            return dif;

        }
    });

    // Combined * and ? based strings - should redo the tokeniser

    // Build tokens by position

    LinkedList<LinkedList<org.apache.lucene.analysis.Token>> tokensByPosition = new LinkedList<LinkedList<org.apache.lucene.analysis.Token>>();
    LinkedList<org.apache.lucene.analysis.Token> currentList = null;
    int lastStart = 0;
    for (org.apache.lucene.analysis.Token c : list) {
        if (c.startOffset() == lastStart) {
            if (currentList == null) {
                currentList = new LinkedList<org.apache.lucene.analysis.Token>();
                tokensByPosition.add(currentList);
            }
            currentList.add(c);
        } else {
            currentList = new LinkedList<org.apache.lucene.analysis.Token>();
            tokensByPosition.add(currentList);
            currentList.add(c);
        }
        lastStart = c.startOffset();
    }

    // Build all the token sequences and see which ones get strung together

    OrderedHashSet<LinkedList<org.apache.lucene.analysis.Token>> allTokenSequencesSet = new OrderedHashSet<LinkedList<org.apache.lucene.analysis.Token>>();
    for (LinkedList<org.apache.lucene.analysis.Token> tokensAtPosition : tokensByPosition) {
        OrderedHashSet<LinkedList<org.apache.lucene.analysis.Token>> positionalSynonymSequencesSet = new OrderedHashSet<LinkedList<org.apache.lucene.analysis.Token>>();

        OrderedHashSet<LinkedList<org.apache.lucene.analysis.Token>> newAllTokenSequencesSet = new OrderedHashSet<LinkedList<org.apache.lucene.analysis.Token>>();

        FOR_FIRST_TOKEN_AT_POSITION_ONLY: for (org.apache.lucene.analysis.Token t : tokensAtPosition) {
            org.apache.lucene.analysis.Token replace = new org.apache.lucene.analysis.Token(t, t.startOffset(),
                    t.endOffset());
            replace.setType(t.type());
            replace.setPositionIncrement(t.getPositionIncrement());

            boolean tokenFoundSequence = false;
            for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : allTokenSequencesSet) {
                LinkedList<org.apache.lucene.analysis.Token> newEntry = new LinkedList<org.apache.lucene.analysis.Token>();
                newEntry.addAll(tokenSequence);
                if ((newEntry.getLast().endOffset() == replace.endOffset())
                        && replace.type().equals(SynonymFilter.TYPE_SYNONYM)) {
                    if ((newEntry.getLast().startOffset() == replace.startOffset())
                            && newEntry.getLast().type().equals(SynonymFilter.TYPE_SYNONYM)) {
                        positionalSynonymSequencesSet.add(tokenSequence);
                        newEntry.add(replace);
                        tokenFoundSequence = true;
                    } else if (newEntry.getLast().type().equals(CommonGramsFilter.GRAM_TYPE)) {
                        if (newEntry.toString().endsWith(replace.toString())) {
                            // already in the gram
                            positionalSynonymSequencesSet.add(tokenSequence);
                            tokenFoundSequence = true;
                        } else {
                            // need to replace the synonym in the current gram
                            tokenFoundSequence = true;
                            StringBuffer old = new StringBuffer(newEntry.getLast().toString());
                            old.replace(replace.startOffset() - newEntry.getLast().startOffset(),
                                    replace.endOffset() - newEntry.getLast().startOffset(), replace.toString());
                            Token newToken = new org.apache.lucene.analysis.Token(old.toString(),
                                    newEntry.getLast().startOffset(), newEntry.getLast().endOffset());
                            newEntry.removeLast();
                            newEntry.add(newToken);
                        }
                    }
                } else if ((newEntry.getLast().startOffset() < replace.startOffset())
                        && (newEntry.getLast().endOffset() < replace.endOffset())) {
                    if (newEntry.getLast().type().equals(SynonymFilter.TYPE_SYNONYM)
                            && replace.type().equals(SynonymFilter.TYPE_SYNONYM)) {
                        positionalSynonymSequencesSet.add(tokenSequence);
                    }
                    newEntry.add(replace);
                    tokenFoundSequence = true;
                }
                newAllTokenSequencesSet.add(newEntry);
            }
            if (false == tokenFoundSequence) {
                for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : newAllTokenSequencesSet) {
                    LinkedList<org.apache.lucene.analysis.Token> newEntry = new LinkedList<org.apache.lucene.analysis.Token>();
                    newEntry.addAll(tokenSequence);
                    if ((newEntry.getLast().endOffset() == replace.endOffset())
                            && replace.type().equals(SynonymFilter.TYPE_SYNONYM)) {
                        if ((newEntry.getLast().startOffset() == replace.startOffset())
                                && newEntry.getLast().type().equals(SynonymFilter.TYPE_SYNONYM)) {
                            positionalSynonymSequencesSet.add(tokenSequence);
                            newEntry.add(replace);
                            tokenFoundSequence = true;
                        } else if (newEntry.getLast().type().equals(CommonGramsFilter.GRAM_TYPE)) {
                            if (newEntry.toString().endsWith(replace.toString())) {
                                // already in the gram
                                positionalSynonymSequencesSet.add(tokenSequence);
                                tokenFoundSequence = true;
                            } else {
                                // need to replace the synonym in the current gram
                                tokenFoundSequence = true;
                                StringBuffer old = new StringBuffer(newEntry.getLast().toString());
                                old.replace(replace.startOffset() - newEntry.getLast().startOffset(),
                                        replace.endOffset() - newEntry.getLast().startOffset(),
                                        replace.toString());
                                Token newToken = new org.apache.lucene.analysis.Token(old.toString(),
                                        newEntry.getLast().startOffset(), newEntry.getLast().endOffset());
                                newEntry.removeLast();
                                newEntry.add(newToken);
                                positionalSynonymSequencesSet.add(newEntry);
                            }
                        }
                    } else if ((newEntry.getLast().startOffset() < replace.startOffset())
                            && (newEntry.getLast().endOffset() < replace.endOffset())) {
                        if (newEntry.getLast().type().equals(SynonymFilter.TYPE_SYNONYM)
                                && replace.type().equals(SynonymFilter.TYPE_SYNONYM)) {
                            positionalSynonymSequencesSet.add(tokenSequence);
                            newEntry.add(replace);
                            tokenFoundSequence = true;
                        }
                    }
                }
            }
            if (false == tokenFoundSequence) {
                LinkedList<org.apache.lucene.analysis.Token> newEntry = new LinkedList<org.apache.lucene.analysis.Token>();
                newEntry.add(replace);
                newAllTokenSequencesSet.add(newEntry);
            }
            // Limit the max number of permutations we consider
            if (newAllTokenSequencesSet.size() > 64) {
                break FOR_FIRST_TOKEN_AT_POSITION_ONLY;
            }
        }
        allTokenSequencesSet = newAllTokenSequencesSet;
        allTokenSequencesSet.addAll(positionalSynonymSequencesSet);

    }

    LinkedList<LinkedList<org.apache.lucene.analysis.Token>> allTokenSequences = new LinkedList<LinkedList<org.apache.lucene.analysis.Token>>(
            allTokenSequencesSet);

    // build the unique

    LinkedList<LinkedList<org.apache.lucene.analysis.Token>> fixedTokenSequences = new LinkedList<LinkedList<org.apache.lucene.analysis.Token>>();
    for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : allTokenSequences) {
        LinkedList<org.apache.lucene.analysis.Token> fixedTokenSequence = new LinkedList<org.apache.lucene.analysis.Token>();
        fixedTokenSequences.add(fixedTokenSequence);
        org.apache.lucene.analysis.Token replace = null;
        for (org.apache.lucene.analysis.Token c : tokenSequence) {
            if (replace == null) {
                StringBuilder prefix = new StringBuilder();
                for (int i = c.startOffset() - 1; i >= 0; i--) {
                    char test = testText.charAt(i);
                    if (((test == '*') || (test == '?')) && wildcardPoistions.contains(i)) {
                        prefix.insert(0, test);
                    } else {
                        break;
                    }
                }
                String pre = prefix.toString();
                if (requiresMLTokenDuplication) {
                    String termText = c.toString();
                    int position = termText.indexOf("}");
                    String language = termText.substring(0, position + 1);
                    String token = termText.substring(position + 1);
                    replace = new org.apache.lucene.analysis.Token(language + pre + token,
                            c.startOffset() - pre.length(), c.endOffset());
                    replace.setType(c.type());
                    replace.setPositionIncrement(c.getPositionIncrement());
                } else {
                    String termText = c.toString();
                    replace = new org.apache.lucene.analysis.Token(pre + termText,
                            c.startOffset() - pre.length(), c.endOffset());
                    replace.setType(c.type());
                    replace.setPositionIncrement(c.getPositionIncrement());
                }
            } else {
                StringBuilder prefix = new StringBuilder();
                StringBuilder postfix = new StringBuilder();
                StringBuilder builder = prefix;
                for (int i = c.startOffset() - 1; i >= replace.endOffset(); i--) {
                    char test = testText.charAt(i);
                    if (((test == '*') || (test == '?')) && wildcardPoistions.contains(i)) {
                        builder.insert(0, test);
                    } else {
                        builder = postfix;
                        postfix.setLength(0);
                    }
                }
                String pre = prefix.toString();
                String post = postfix.toString();

                // Does it bridge?
                if ((pre.length() > 0) && (replace.endOffset() + pre.length()) == c.startOffset()) {
                    String termText = c.toString();
                    if (requiresMLTokenDuplication) {
                        int position = termText.indexOf("}");
                        @SuppressWarnings("unused")
                        String language = termText.substring(0, position + 1);
                        String token = termText.substring(position + 1);
                        int oldPositionIncrement = replace.getPositionIncrement();
                        String replaceTermText = replace.toString();
                        replace = new org.apache.lucene.analysis.Token(replaceTermText + pre + token,
                                replace.startOffset(), c.endOffset());
                        replace.setType(replace.type());
                        replace.setPositionIncrement(oldPositionIncrement);
                    } else {
                        int oldPositionIncrement = replace.getPositionIncrement();
                        String replaceTermText = replace.toString();
                        replace = new org.apache.lucene.analysis.Token(replaceTermText + pre + termText,
                                replace.startOffset(), c.endOffset());
                        replace.setType(replace.type());
                        replace.setPositionIncrement(oldPositionIncrement);
                    }
                } else {
                    String termText = c.toString();
                    if (requiresMLTokenDuplication) {
                        int position = termText.indexOf("}");
                        String language = termText.substring(0, position + 1);
                        String token = termText.substring(position + 1);
                        String replaceTermText = replace.toString();
                        org.apache.lucene.analysis.Token last = new org.apache.lucene.analysis.Token(
                                replaceTermText + post, replace.startOffset(),
                                replace.endOffset() + post.length());
                        last.setType(replace.type());
                        last.setPositionIncrement(replace.getPositionIncrement());
                        fixedTokenSequence.add(last);
                        replace = new org.apache.lucene.analysis.Token(language + pre + token,
                                c.startOffset() - pre.length(), c.endOffset());
                        replace.setType(c.type());
                        replace.setPositionIncrement(c.getPositionIncrement());
                    } else {
                        String replaceTermText = replace.toString();
                        org.apache.lucene.analysis.Token last = new org.apache.lucene.analysis.Token(
                                replaceTermText + post, replace.startOffset(),
                                replace.endOffset() + post.length());
                        last.setType(replace.type());
                        last.setPositionIncrement(replace.getPositionIncrement());
                        fixedTokenSequence.add(last);
                        replace = new org.apache.lucene.analysis.Token(pre + termText,
                                c.startOffset() - pre.length(), c.endOffset());
                        replace.setType(c.type());
                        replace.setPositionIncrement(c.getPositionIncrement());
                    }
                }
            }
        }
        // finish last
        if (replace != null) {
            StringBuilder postfix = new StringBuilder();
            if ((replace.endOffset() >= 0) && (replace.endOffset() < testText.length())) {
                for (int i = replace.endOffset(); i < testText.length(); i++) {
                    char test = testText.charAt(i);
                    if (((test == '*') || (test == '?')) && wildcardPoistions.contains(i)) {
                        postfix.append(test);
                    } else {
                        break;
                    }
                }
            }
            String post = postfix.toString();
            int oldPositionIncrement = replace.getPositionIncrement();
            String replaceTermText = replace.toString();
            replace = new org.apache.lucene.analysis.Token(replaceTermText + post, replace.startOffset(),
                    replace.endOffset() + post.length());
            replace.setType(replace.type());
            replace.setPositionIncrement(oldPositionIncrement);
            fixedTokenSequence.add(replace);
        }
    }

    // rebuild fixed list

    ArrayList<org.apache.lucene.analysis.Token> fixed = new ArrayList<org.apache.lucene.analysis.Token>();
    for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : fixedTokenSequences) {
        for (org.apache.lucene.analysis.Token token : tokenSequence) {
            fixed.add(token);
        }
    }

    // reorder by start position and increment

    Collections.sort(fixed, new Comparator<org.apache.lucene.analysis.Token>() {

        public int compare(Token o1, Token o2) {
            int dif = o1.startOffset() - o2.startOffset();
            if (dif != 0) {
                return dif;
            } else {
                return o1.getPositionIncrement() - o2.getPositionIncrement();
            }
        }
    });

    // make sure we remove any tokens we have duplicated

    @SuppressWarnings("rawtypes")
    OrderedHashSet unique = new OrderedHashSet();
    unique.addAll(fixed);
    fixed = new ArrayList<org.apache.lucene.analysis.Token>(unique);

    list = fixed;

    // add any missing locales back to the tokens

    if (localePrefix.length() > 0) {
        for (int j = 0; j < list.size(); j++) {
            org.apache.lucene.analysis.Token currentToken = list.get(j);
            String termText = currentToken.toString();
            currentToken.setEmpty();
            currentToken.append(localePrefix + termText);
        }
    }

    SchemaField sf = schema.getField(field);
    TokenizerChain tokenizerChain = (sf.getType().getQueryAnalyzer() instanceof TokenizerChain)
            ? ((TokenizerChain) sf.getType().getQueryAnalyzer())
            : null;
    boolean isShingled = false;
    if (tokenizerChain != null) {
        for (TokenFilterFactory factory : tokenizerChain.getTokenFilterFactories()) {
            if (factory instanceof ShingleFilterFactory) {
                isShingled = true;
                break;
            }
        }
    }
    AlfrescoAnalyzerWrapper analyzerWrapper = (sf.getType()
            .getQueryAnalyzer() instanceof AlfrescoAnalyzerWrapper)
                    ? ((AlfrescoAnalyzerWrapper) sf.getType().getQueryAnalyzer())
                    : null;
    if (analyzerWrapper != null) {
        // assume if there are no term positions it is shingled ....
        isShingled = true;
    }

    boolean forceConjuncion = rerankPhase == RerankPhase.QUERY_PHASE;

    if (list.size() == 0)
        return null;
    else if (list.size() == 1) {
        nextToken = list.get(0);
        String termText = nextToken.toString();
        if (!isNumeric && (termText.contains("*") || termText.contains("?"))) {
            return newWildcardQuery(new Term(field, termText));
        } else {
            return newTermQuery(new Term(field, termText));
        }
    } else {
        if (severalTokensAtSamePosition) {
            if (positionCount == 1) {
                // no phrase query:
                BooleanQuery q = newBooleanQuery(true);
                for (int i = 0; i < list.size(); i++) {
                    Query currentQuery;
                    nextToken = list.get(i);
                    String termText = nextToken.toString();
                    if (termText.contains("*") || termText.contains("?")) {
                        currentQuery = newWildcardQuery(new Term(field, termText));
                    } else {
                        currentQuery = newTermQuery(new Term(field, termText));
                    }
                    q.add(currentQuery, BooleanClause.Occur.SHOULD);
                }
                return q;
            } else if (forceConjuncion) {
                BooleanQuery or = new BooleanQuery();

                for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : fixedTokenSequences) {
                    BooleanQuery and = new BooleanQuery();
                    for (int i = 0; i < tokenSequence.size(); i++) {
                        nextToken = (org.apache.lucene.analysis.Token) tokenSequence.get(i);
                        String termText = nextToken.toString();

                        Term term = new Term(field, termText);
                        if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                            org.apache.lucene.search.WildcardQuery wildQuery = new org.apache.lucene.search.WildcardQuery(
                                    term);
                            and.add(wildQuery, Occur.MUST);
                        } else {
                            TermQuery termQuery = new TermQuery(term);
                            and.add(termQuery, Occur.MUST);
                        }
                    }
                    if (and.clauses().size() > 0) {
                        or.add(and, Occur.SHOULD);
                    }
                }
                return or;
            }
            // shingle
            else if (sf.omitPositions() && isShingled) {

                ArrayList<org.apache.lucene.analysis.Token> nonContained = getNonContained(list);
                Query currentQuery;

                BooleanQuery weakPhrase = new BooleanQuery();
                for (org.apache.lucene.analysis.Token shingleToken : nonContained) {
                    String termText = shingleToken.toString();
                    Term term = new Term(field, termText);

                    if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                        currentQuery = new org.apache.lucene.search.WildcardQuery(term);
                    } else {
                        currentQuery = new TermQuery(term);
                    }
                    weakPhrase.add(currentQuery, Occur.MUST);
                }

                return weakPhrase;

            }
            // Consider if we can use a multi-phrase query (e.g for synonym use rather then WordDelimiterFilterFactory)
            else if (canUseMultiPhraseQuery(fixedTokenSequences)) {
                // phrase query:
                MultiPhraseQuery mpq = newMultiPhraseQuery();
                mpq.setSlop(internalSlop);
                ArrayList<Term> multiTerms = new ArrayList<Term>();
                int position = 0;
                for (int i = 0; i < list.size(); i++) {
                    nextToken = list.get(i);
                    String termText = nextToken.toString();

                    Term term = new Term(field, termText);
                    if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                        throw new IllegalStateException("Wildcards are not allowed in multi phrase anymore");
                    } else {
                        multiTerms.add(term);
                    }

                    if (nextToken.getPositionIncrement() > 0 && multiTerms.size() > 0) {
                        if (getEnablePositionIncrements()) {
                            mpq.add(multiTerms.toArray(new Term[0]), position);
                        } else {
                            mpq.add(multiTerms.toArray(new Term[0]));
                        }
                        checkTermCount(field, queryText, mpq);
                        multiTerms.clear();
                    }
                    position += nextToken.getPositionIncrement();

                }
                if (getEnablePositionIncrements()) {
                    if (multiTerms.size() > 0) {
                        mpq.add(multiTerms.toArray(new Term[0]), position);
                    }
                    //                        else
                    //                        {
                    //                            mpq.add(new Term[] { new Term(field, "\u0000") }, position);
                    //                        }
                } else {
                    if (multiTerms.size() > 0) {
                        mpq.add(multiTerms.toArray(new Term[0]));
                    }
                    //                        else
                    //                        {
                    //                            mpq.add(new Term[] { new Term(field, "\u0000") });
                    //                        }
                }
                checkTermCount(field, queryText, mpq);
                return mpq;

            }
            // Word delimiter factory and other odd things generate complex token patterns
            // Smart skip token  sequences with small tokens that generate toomany wildcards
            // Fall back to the larger pattern
            // e.g Site1* will not do (S ite 1*) or (Site 1*)  if 1* matches too much (S ite1*)  and (Site1*) will still be OK 
            // If we skip all (for just 1* in the input) this is still an issue.
            else {

                return generateSpanOrQuery(field, fixedTokenSequences);

            }
        } else {
            if (forceConjuncion) {
                BooleanQuery or = new BooleanQuery();

                for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : fixedTokenSequences) {
                    BooleanQuery and = new BooleanQuery();
                    for (int i = 0; i < tokenSequence.size(); i++) {
                        nextToken = (org.apache.lucene.analysis.Token) tokenSequence.get(i);
                        String termText = nextToken.toString();

                        Term term = new Term(field, termText);
                        if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                            org.apache.lucene.search.WildcardQuery wildQuery = new org.apache.lucene.search.WildcardQuery(
                                    term);
                            and.add(wildQuery, Occur.MUST);
                        } else {
                            TermQuery termQuery = new TermQuery(term);
                            and.add(termQuery, Occur.MUST);
                        }
                    }
                    if (and.clauses().size() > 0) {
                        or.add(and, Occur.SHOULD);
                    }
                }
                return or;
            } else {
                SpanQuery spanQuery = null;
                SpanOrQuery atSamePosition = new SpanOrQuery();
                int gap = 0;
                for (int i = 0; i < list.size(); i++) {
                    nextToken = list.get(i);
                    String termText = nextToken.toString();
                    Term term = new Term(field, termText);
                    if (getEnablePositionIncrements()) {
                        SpanQuery nextSpanQuery;
                        if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                            org.apache.lucene.search.WildcardQuery wildQuery = new org.apache.lucene.search.WildcardQuery(
                                    term);
                            SpanMultiTermQueryWrapper wrapper = new SpanMultiTermQueryWrapper<>(wildQuery);
                            wrapper.setRewriteMethod(
                                    new TopTermsSpanBooleanQueryRewrite(topTermSpanRewriteLimit));
                            nextSpanQuery = wrapper;
                        } else {
                            nextSpanQuery = new SpanTermQuery(term);
                        }
                        if (gap == 0) {
                            atSamePosition.addClause(nextSpanQuery);
                        } else {
                            if (atSamePosition.getClauses().length == 0) {
                                if (spanQuery == null) {
                                    spanQuery = nextSpanQuery;
                                } else {
                                    spanQuery = new SpanNearQuery(new SpanQuery[] { spanQuery, nextSpanQuery },
                                            (gap - 1) + internalSlop, internalSlop < 2);
                                }
                                atSamePosition = new SpanOrQuery();
                            } else if (atSamePosition.getClauses().length == 1) {
                                if (spanQuery == null) {
                                    spanQuery = atSamePosition.getClauses()[0];
                                } else {
                                    spanQuery = new SpanNearQuery(
                                            new SpanQuery[] { spanQuery, atSamePosition.getClauses()[0] },
                                            (gap - 1) + internalSlop, internalSlop < 2);
                                }
                                atSamePosition = new SpanOrQuery();
                                atSamePosition.addClause(nextSpanQuery);
                            } else {
                                if (spanQuery == null) {
                                    spanQuery = atSamePosition;
                                } else {
                                    spanQuery = new SpanNearQuery(new SpanQuery[] { spanQuery, atSamePosition },
                                            (gap - 1) + internalSlop, internalSlop < 2);
                                }
                                atSamePosition = new SpanOrQuery();
                                atSamePosition.addClause(nextSpanQuery);
                            }
                        }
                        gap = nextToken.getPositionIncrement();
                    } else {
                        SpanQuery nextSpanQuery;
                        if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                            org.apache.lucene.search.WildcardQuery wildQuery = new org.apache.lucene.search.WildcardQuery(
                                    term);
                            SpanMultiTermQueryWrapper wrapper = new SpanMultiTermQueryWrapper<>(wildQuery);
                            wrapper.setRewriteMethod(
                                    new TopTermsSpanBooleanQueryRewrite(topTermSpanRewriteLimit));
                            nextSpanQuery = wrapper;
                        } else {
                            nextSpanQuery = new SpanTermQuery(term);
                        }
                        if (spanQuery == null) {
                            spanQuery = new SpanOrQuery();
                            ((SpanOrQuery) spanQuery).addClause(nextSpanQuery);
                        } else {
                            ((SpanOrQuery) spanQuery).addClause(nextSpanQuery);
                        }
                    }
                }
                if (atSamePosition.getClauses().length == 0) {
                    return spanQuery;
                } else if (atSamePosition.getClauses().length == 1) {
                    if (spanQuery == null) {
                        spanQuery = atSamePosition.getClauses()[0];
                    } else {
                        spanQuery = new SpanNearQuery(
                                new SpanQuery[] { spanQuery, atSamePosition.getClauses()[0] },
                                (gap - 1) + internalSlop, internalSlop < 2);
                    }
                    return spanQuery;
                } else {
                    if (spanQuery == null) {
                        spanQuery = atSamePosition;
                    } else {
                        spanQuery = new SpanNearQuery(new SpanQuery[] { spanQuery, atSamePosition },
                                (gap - 1) + internalSlop, internalSlop < 2);
                    }
                    return spanQuery;
                }
            }
        }
    }
}