List of usage examples for java.util LinkedList removeLast
public E removeLast()
From source file:tokyo.northside.jrst.JRSTLexer.java
/** * read table simple and complexe/*from www . j a v a2s . c o m*/ * * <pre> * +------------------------+------------+----------+----------+ * | Header row, column 1 | Header 2 | Header 3 | Header 4 | * | (header rows optional) | | | | * +========================+============+==========+==========+ * | body row 1, column 1 | column 2 | column 3 | column 4 | * +------------------------+------------+----------+----------+ * | body row 2 | Cells may span columns. | * +------------------------+------------+---------------------+ * </pre> * * @return Element * @throws IOException */ @SuppressWarnings("unchecked") public Element peekTable() throws IOException { beginPeek(); Element result = null; // in.skipBlankLines(); String line = in.readLine(); if (line != null) { Pattern pTableBegin = Pattern.compile("^\\s*(\\+-+)+\\+\\s*$"); Matcher matcher = null; matcher = pTableBegin.matcher(line); if (matcher.matches()) { // complexe table result = DocumentHelper.createElement(TABLE); result.addAttribute(TABLE_HEADER, FALSE); int level = level(line); result.addAttribute(LEVEL, String.valueOf(level)); line = line.trim(); int tableWidth = line.length(); result.addAttribute(TABLE_WIDTH, String.valueOf(tableWidth)); Pattern pCellEnd = Pattern .compile("^\\s{" + level + "}(\\+-+\\+|\\|(?:[^+]+))([^+]+(?:\\+|\\|\\s*$)|-+\\+)*\\s*"); // fin // de // ligne Pattern pCell = Pattern.compile("^\\s{" + level + "}(\\|[^|]+)+\\|\\s*$"); // une ligne Pattern pHeader = Pattern.compile("^\\s{" + level + "}(\\+=+)+\\+\\s*$"); // fin du header Pattern pEnd = Pattern.compile("^\\s{" + level + "}(\\+-+)+\\+\\s*$"); // fin de table // used to know if | is cell separator or not String lastSeparationLine = line; String lastLine = line; Element row = DocumentHelper.createElement(ROW); String[] table = in.readUntilBlank(); boolean done = false; for (String l : table) { done = false; l = l.trim(); if (l.length() != tableWidth) { // Erreur dans la table, peut-etre lever une exception ? result = null; break; } matcher = pEnd.matcher(l); if (!done && matcher.matches()) { // fin normale de ligne, on peut directement l'assigner lastSeparationLine = l; for (Element cell : (List<Element>) row.elements()) { cell.addAttribute(CELL_END, TRUE); } row.addAttribute(ROW_END_HEADER, FALSE); result.add(row); row = DocumentHelper.createElement(ROW); done = true; } matcher = pHeader.matcher(l); if (!done && matcher.matches()) { // fin de du header, on peut directement l'assigner lastSeparationLine = l; for (Element cell : (List<Element>) row.elements()) { cell.addAttribute(CELL_END, TRUE); } row.addAttribute(ROW_END_HEADER, TRUE); result.add(row); result.addAttribute(TABLE_HEADER, TRUE); row = DocumentHelper.createElement(ROW); done = true; } matcher = pCell.matcher(l); if (!done && matcher.matches()) { // debug row.addAttribute("debug", "pCell"); // recuperation des textes des cellules int start = -1; String content = ""; matcher = Pattern.compile("([^|]+)\\|").matcher(l); for (int cellNumber = 0; matcher.find(); cellNumber++) { int tmpstart = matcher.start(1); int end = matcher.end(1); String tmpcontent = matcher.group(1); // on a forcement un | ou un + au dessus du + // et forcement un + sur lastSeparationLine // sinon ca veut dire qu'il y avait un | dans la // cell if ((lastLine.charAt(end) == '|' || lastLine.charAt(end) == '+') && lastSeparationLine.charAt(end) == '+') { if ("".equals(content)) { content = tmpcontent; } else { content += tmpcontent; } if (start == -1) { start = tmpstart; } Element cell = null; if (row.nodeCount() <= cellNumber) { cell = row.addElement(CELL); cell.addAttribute(CELL_END, FALSE); } else { cell = (Element) row.node(cellNumber); } cell.addAttribute(CELL_INDEX_START, String.valueOf(start)); cell.addAttribute(CELL_INDEX_END, String.valueOf(end)); cell.setText(cell.getText() + content + "\n"); start = end + 1; // +1 to pass + or | at end // of cell content = ""; } else { // start = tmpstart; if (start == -1) { start = tmpstart; } content += tmpcontent + "|"; cellNumber--; } } done = true; } matcher = pCellEnd.matcher(l); if (!done && matcher.matches()) { // debug row.addAttribute("debug", "pCellEnd"); // fin d'une ligne, on ne peut pas l'assigner // directement // pour chaque continuation de cellule, il faut copier // l'ancienne valeur // mais on commence tout de meme par fermer tout les // cells for (Element cell : (List<Element>) row.elements()) { cell.addAttribute(CELL_END, TRUE); } StringBuffer tmp = new StringBuffer(l); int start = -1; String content = ""; matcher = Pattern.compile("([^+|]+|-+)([+|])").matcher(l); for (int cellNumber = 0; matcher.find(); cellNumber++) { int tmpstart = matcher.start(1); int end = matcher.end(1); String tmpcontent = matcher.group(1); String ender = matcher.group(2); if (!tmpcontent.matches("-+")) { // on a forcement un | au dessus du + ou du | // sinon ca veut dire qu'il y avait un + dans la // cell if (lastLine.charAt(end) == '|') { if (start == -1) { start = tmpstart; } // -1 and +1 to take the + or | at begin and // end String old = lastSeparationLine.substring(start - 1, end + 1); tmp.replace(start - 1, end + 1, old); if ("".equals(content)) { content = tmpcontent; } Element cell = null; if (row.nodeCount() <= cellNumber) { cell = row.addElement(CELL); } else { cell = (Element) row.node(cellNumber); } cell.setText(cell.getText() + content + "\n"); // on a ajouter des choses dans la cell, // donc // ce n'est pas la fin cell.addAttribute(CELL_END, FALSE); cell.addAttribute(CELL_INDEX_START, String.valueOf(start)); cell.addAttribute(CELL_INDEX_END, String.valueOf(end)); start = end + 1; // +1 to pass + or | at // end of cell content = ""; } else { // start = tmpstart; content += tmpcontent + ender; } } } lastSeparationLine = tmp.toString(); row.addAttribute(ROW_END_HEADER, FALSE); result.add(row); row = DocumentHelper.createElement(ROW); done = true; } if (!done) { log.warn("Bad table format line " + in.getLineNumber()); } lastLine = l; } // // line += "\n" + joinBlock(table, "\n", false); // // result.addText(line); } else if (line.matches("^\\s*(=+ +)+=+\\s*$")) { // Les donnees de la table peuvent depasser de celle-ci /* * ===== ===== ====== Inputs Output ------------ ------ A B A or * B ===== ===== ====== False False Second column of row 1. True * False Second column of row 2. * * True 2 - Second column of row 3. * * - Second item in bullet list (row 3, column 2). ============ * ====== */ result = DocumentHelper.createElement(TABLE); line = line.trim(); Pattern pBordersEquals = Pattern.compile("^\\s*(=+ +)+=+\\s*$"); // Separation // = Pattern pBordersTiret = Pattern.compile("^\\s*(-+ +)+-+\\s*$"); // Separation // - Pattern pBorders = Pattern.compile("^\\s*([=-]+ +)+[=-]+\\s*$"); // = // ou // - String[] table = in.readUntilBlank(); // Recuperation de la // table int tableWidth = line.length(); int nbSeparations = 0; for (String l : table) { if (l.length() > tableWidth) { tableWidth = l.length(); // Determination de la } // Determination de la // longueur max matcher = pBordersEquals.matcher(l); if (matcher.matches()) { nbSeparations++; } } // Header if the table contains 3 equals separations result.addAttribute(TABLE_HEADER, "" + (nbSeparations == 2)); int level = level(line); result.addAttribute(LEVEL, String.valueOf(level)); result.addAttribute(TABLE_WIDTH, String.valueOf(tableWidth + 1)); Element row = DocumentHelper.createElement(ROW); // Determination of the columns positions List<Integer> columns = new LinkedList<Integer>(); matcher = Pattern.compile("=+\\s+").matcher(line); for (int cellNumber = 0; matcher.find(); cellNumber++) { columns.add(matcher.end()); } columns.add(tableWidth); // Traitement du tbl /* * ===== ===== ====== Inputs Output ------------ ------ A B A or * B ===== ===== ====== False False Second column of row 1. True * False Second column of row 2. * * True 2 - Second column of row 3. * * - Second item in bullet list (row 3, column 2). ============ * ====== devient l'equivalent : ===== ===== ====== Inputs * Output ------------ ------ A B A or B ===== ===== ====== * False False Second column of row 1. ----- ----- ------ True * False Second column of row 2. ----- ----- ------ True 2 - * Second column of row 3. - Second item in bullet list (row 3, * column 2). ============ ====== */ String lineRef = line.replace('=', '-'); Matcher matcher2; List<String> tableTmp = new LinkedList<String>(); for (int i = 0; i < table.length - 1; i++) { tableTmp.add(table[i]); if (!table[i].equals("")) { if (!table[i + 1].substring(0, columns.get(0)).matches("\\s*")) { matcher = pBorders.matcher(table[i]); matcher2 = pBorders.matcher(table[i + 1]); if (!matcher.matches() && !matcher2.matches() && !table[i + 1].equals("")) { tableTmp.add(lineRef); } } } } tableTmp.add(table[table.length - 1]); table = new String[tableTmp.size()]; for (int i = 0; i < tableTmp.size(); i++) { table[i] = tableTmp.get(i); } boolean done = false; LinkedList<String> lastLines = new LinkedList<String>(); int separation = 1; for (String l : table) { if (l != null) { done = false; matcher = pBordersTiret.matcher(l); matcher2 = pBordersEquals.matcher(l); if (matcher.matches() || matcher2.matches()) { // Intermediate // separation while (!lastLines.isEmpty()) { matcher = Pattern.compile("[-=]+\\s*").matcher(l); String tmpLine = lastLines.getLast(); lastLines.removeLast(); int cellNumber; for (cellNumber = 0; matcher.find(); cellNumber++) { Element cell = null; if (row.nodeCount() <= cellNumber) { cell = row.addElement(CELL); } else { cell = (Element) row.node(cellNumber); } if (matcher.start() < tmpLine.length()) { if (columns.size() - 1 == cellNumber) { cell.setText( tmpLine.substring(matcher.start(), tmpLine.length()) + "\n"); } else { if (matcher.end() < tmpLine.length()) { cell.setText( tmpLine.substring(matcher.start(), matcher.end()) + "\n"); } else { cell.setText(tmpLine.substring(matcher.start(), tmpLine.length()) + "\n"); } } } if (lastLines.size() == 0) { row.addAttribute("debug", "pCell"); cell.addAttribute(CELL_END, TRUE); } else { row.addAttribute("debug", "pCellEnd"); cell.addAttribute(CELL_END, FALSE); } cell.addAttribute(CELL_INDEX_START, String.valueOf(matcher.start() + 1)); if (line.length() == matcher.end()) { cell.addAttribute(CELL_INDEX_END, String.valueOf(columns.get(columns.size() - 1))); } else { cell.addAttribute(CELL_INDEX_END, String.valueOf(matcher.end())); } } if (matcher2.matches()) { separation++; row.addAttribute(ROW_END_HEADER, "" + (separation == 2)); } else { row.addAttribute(ROW_END_HEADER, FALSE); } result.add(row); row = DocumentHelper.createElement(ROW); done = true; } } if (!done && l.matches("^\\s*(.+ +)+.+\\s*$")) { // Data lastLines.addFirst(l); // Les donnees sont stoquee // dans une file d'attente // lastLines (FIFO) done = true; } if (!done) { log.warn("Bad table format line " + in.getLineNumber()); } } } } } endPeek(); return result; }
From source file:org.apereo.portal.url.UrlSyntaxProviderImpl.java
@Override public IPortalRequestInfo getPortalRequestInfo(HttpServletRequest request) { request = this.portalRequestUtils.getOriginalPortalRequest(request); final IPortalRequestInfo cachedPortalRequestInfo = (IPortalRequestInfo) request .getAttribute(PORTAL_REQUEST_INFO_ATTR); if (cachedPortalRequestInfo != null) { if (logger.isDebugEnabled()) { logger.debug("short-circuit: found portalRequestInfo within request attributes"); }//w ww . j a v a 2s .co m return cachedPortalRequestInfo; } synchronized (PortalWebUtils.getRequestAttributeMutex(request)) { // set a flag to say this request is currently being parsed final Boolean inProgressAttr = (Boolean) request.getAttribute(PORTAL_REQUEST_PARSING_IN_PROGRESS_ATTR); if (inProgressAttr != null && inProgressAttr) { if (logger.isDebugEnabled()) { logger.warn("Portal request info parsing already in progress, returning null"); } return null; } request.setAttribute(PORTAL_REQUEST_PARSING_IN_PROGRESS_ATTR, Boolean.TRUE); } try { // Clone the parameter map so data can be removed from it as it is parsed to help determine // what to do with non-namespaced parameters final Map<String, String[]> parameterMap = new ParameterMap(request.getParameterMap()); final String requestPath = this.urlPathHelper.getPathWithinApplication(request); if (LEGACY_URL_PATHS.contains(requestPath)) { return parseLegacyPortalUrl(request, parameterMap); } final IUrlNodeSyntaxHelper urlNodeSyntaxHelper = this.urlNodeSyntaxHelperRegistry .getCurrentUrlNodeSyntaxHelper(request); final PortalRequestInfoImpl portalRequestInfo = new PortalRequestInfoImpl(); IPortletWindowId targetedPortletWindowId = null; PortletRequestInfoImpl targetedPortletRequestInfo = null; final String[] requestPathParts = SLASH_PATTERN.split(requestPath); UrlState requestedUrlState = null; ParseStep parseStep = ParseStep.FOLDER; for (int pathPartIndex = 0; pathPartIndex < requestPathParts.length; pathPartIndex++) { String pathPart = requestPathParts[pathPartIndex]; logger.trace("In parseStep {} considering pathPart [{}].", parseStep, pathPart); if (StringUtils.isEmpty(pathPart)) { continue; } switch (parseStep) { case FOLDER: { parseStep = ParseStep.PORTLET; if (FOLDER_PATH_PREFIX.equals(pathPart)) { logger.trace("Skipping adding {} to the folders deque " + "because it is simply the folder path prefix.", pathPart); pathPartIndex++; final LinkedList<String> folders = new LinkedList<String>(); for (; pathPartIndex < requestPathParts.length; pathPartIndex++) { pathPart = requestPathParts[pathPartIndex]; if (PORTLET_PATH_PREFIX.equals(pathPart)) { logger.trace( "Found the portlet part of the path " + "demarked by portlet path prefix [{}]; " + "stepping back one path part to finish folder processing", pathPart); pathPartIndex--; break; } else { if (pathPart.endsWith(REQUEST_TYPE_SUFFIX)) { logger.trace("Found the end of the folder path with pathPart [{}];" + " stepping back one, checking for state, " + "and finishing folder parsing", pathPart); pathPartIndex--; pathPart = requestPathParts[pathPartIndex]; // If a state was added to the folder list remove it and step back one so // other code can handle it if (UrlState.valueOfIngoreCase(pathPart, null) != null) { logger.trace( "A state was added to the end of folder list {};" + " removing it.", folders); folders.removeLast(); pathPartIndex--; } break; } } logger.trace("Adding pathPart [{}] to folders.", pathPart); folders.add(pathPart); } logger.trace("Folders is [{}]", folders); if (folders.size() > 0) { final String targetedLayoutNodeId = urlNodeSyntaxHelper .getLayoutNodeForFolderNames(request, folders); portalRequestInfo.setTargetedLayoutNodeId(targetedLayoutNodeId); } break; } } case PORTLET: { parseStep = ParseStep.STATE; final String targetedLayoutNodeId = portalRequestInfo.getTargetedLayoutNodeId(); if (PORTLET_PATH_PREFIX.equals(pathPart)) { if (++pathPartIndex < requestPathParts.length) { pathPart = requestPathParts[pathPartIndex]; targetedPortletWindowId = urlNodeSyntaxHelper.getPortletForFolderName(request, targetedLayoutNodeId, pathPart); } break; } //See if a portlet was targeted by parameter final String[] targetedPortletIds = parameterMap.remove(PARAM_TARGET_PORTLET); if (targetedPortletIds != null && targetedPortletIds.length > 0) { final String targetedPortletString = targetedPortletIds[0]; targetedPortletWindowId = urlNodeSyntaxHelper.getPortletForFolderName(request, targetedLayoutNodeId, targetedPortletString); } } case STATE: { parseStep = ParseStep.TYPE; //States other than the default only make sense if a portlet is being targeted if (targetedPortletWindowId == null) { break; } requestedUrlState = UrlState.valueOfIngoreCase(pathPart, null); //Set the URL state if (requestedUrlState != null) { portalRequestInfo.setUrlState(requestedUrlState); //If the request is stateless if (statelessUrlStates.contains(requestedUrlState)) { final IPortletWindow statelessPortletWindow = this.portletWindowRegistry .getOrCreateStatelessPortletWindow(request, targetedPortletWindowId); targetedPortletWindowId = statelessPortletWindow.getPortletWindowId(); } //Create the portlet request info targetedPortletRequestInfo = portalRequestInfo .getPortletRequestInfo(targetedPortletWindowId); portalRequestInfo.setTargetedPortletWindowId(targetedPortletWindowId); //Set window state based on URL State first then look for the window state parameter switch (requestedUrlState) { case MAX: { targetedPortletRequestInfo.setWindowState(WindowState.MAXIMIZED); } break; case DETACHED: { targetedPortletRequestInfo.setWindowState(IPortletRenderer.DETACHED); } break; case EXCLUSIVE: { targetedPortletRequestInfo.setWindowState(IPortletRenderer.EXCLUSIVE); } break; } break; } } case TYPE: { parseStep = ParseStep.COMPLETE; if (pathPartIndex == requestPathParts.length - 1 && pathPart.endsWith(REQUEST_TYPE_SUFFIX) && pathPart.length() > REQUEST_TYPE_SUFFIX.length()) { final String urlTypePart = pathPart.substring(0, pathPart.length() - REQUEST_TYPE_SUFFIX.length()); final UrlType urlType; //Handle inline resourceIds, look for a . in the request type string and use the suffix as the urlType final int lastPeriod = urlTypePart.lastIndexOf('.'); if (lastPeriod >= 0 && lastPeriod < urlTypePart.length()) { final String urlTypePartSuffix = urlTypePart.substring(lastPeriod + 1); urlType = UrlType.valueOfIngoreCase(urlTypePartSuffix, null); if (urlType == UrlType.RESOURCE && targetedPortletRequestInfo != null) { final String resourceId = urlTypePart.substring(0, lastPeriod); targetedPortletRequestInfo.setResourceId(resourceId); } } else { urlType = UrlType.valueOfIngoreCase(urlTypePart, null); } if (urlType != null) { portalRequestInfo.setUrlType(urlType); break; } } } } } //If a targeted portlet window ID is found but no targeted portlet request info has been retrieved yet, set it up if (targetedPortletWindowId != null && targetedPortletRequestInfo == null) { targetedPortletRequestInfo = portalRequestInfo.getPortletRequestInfo(targetedPortletWindowId); portalRequestInfo.setTargetedPortletWindowId(targetedPortletWindowId); } //Get the set of portlet window ids that also have parameters on the url final String[] additionalPortletIdArray = parameterMap.remove(PARAM_ADDITIONAL_PORTLET); final Set<String> additionalPortletIds = Sets .newHashSet(additionalPortletIdArray != null ? additionalPortletIdArray : new String[0]); //Used if there is delegation to capture form-submit and other non-prefixed parameters //Map of parent id to delegate id final Map<IPortletWindowId, IPortletWindowId> delegateIdMappings = new LinkedHashMap<IPortletWindowId, IPortletWindowId>( 0); //Parse all remaining parameters from the request final Set<Entry<String, String[]>> parameterEntrySet = parameterMap.entrySet(); for (final Iterator<Entry<String, String[]>> parameterEntryItr = parameterEntrySet .iterator(); parameterEntryItr.hasNext();) { final Entry<String, String[]> parameterEntry = parameterEntryItr.next(); final String name = parameterEntry.getKey(); final List<String> values = Arrays.asList(parameterEntry.getValue()); /* NOTE: continues are being used to allow fall-through behavior like a switch statement would provide */ //Portal Parameters, just need to remove the prefix if (name.startsWith(PORTAL_PARAM_PREFIX)) { final Map<String, List<String>> portalParameters = portalRequestInfo.getPortalParameters(); portalParameters.put(this.safeSubstringAfter(PORTAL_PARAM_PREFIX, name), values); parameterEntryItr.remove(); continue; } //Generic portlet parameters, have to remove the prefix and see if there was a portlet windowId between the prefix and parameter name if (name.startsWith(PORTLET_PARAM_PREFIX)) { final Tuple<String, IPortletWindowId> portletParameterParts = this .parsePortletParameterName(request, name, additionalPortletIds); final IPortletWindowId portletWindowId = portletParameterParts.second; final String paramName = portletParameterParts.first; //Get the portlet parameter map to add the parameter to final Map<String, List<String>> portletParameters; if (portletWindowId == null) { if (targetedPortletRequestInfo == null) { this.logger.warn("Parameter " + name + " is for the targeted portlet but no portlet is targeted by the request. The parameter will be ignored. Value: " + values); parameterEntryItr.remove(); break; } portletParameters = targetedPortletRequestInfo.getPortletParameters(); } else { final PortletRequestInfoImpl portletRequestInfoImpl = portalRequestInfo .getPortletRequestInfo(portletWindowId); portletParameters = portletRequestInfoImpl.getPortletParameters(); } portletParameters.put(paramName, values); parameterEntryItr.remove(); continue; } // Portlet control parameters are either used directly or as a prefix to a windowId. Use the // SuffixedPortletParameter to simplify their parsing for (final SuffixedPortletParameter suffixedPortletParameter : SuffixedPortletParameter.values()) { final String parameterPrefix = suffixedPortletParameter.getParameterPrefix(); //Skip to the next parameter prefix if the current doesn't match if (!name.startsWith(parameterPrefix)) { continue; } //All of these parameters require at least one value if (values.isEmpty()) { this.logger.warn("Ignoring parameter {} as it must have a value. Value: {}", name, values); break; } //Verify the parameter is being used on the correct type of URL final Set<UrlType> validUrlTypes = suffixedPortletParameter.getValidUrlTypes(); if (!validUrlTypes.contains(portalRequestInfo.getUrlType())) { this.logger.warn( "Ignoring parameter {} as it is only valid for {} requests and this is a " + "{} request. Value: {}", name, validUrlTypes, portalRequestInfo.getUrlType(), values); break; } //Determine the portlet window and request info the parameter targets final IPortletWindowId portletWindowId = this.parsePortletWindowIdSuffix(request, parameterPrefix, additionalPortletIds, name); final PortletRequestInfoImpl portletRequestInfo = getTargetedPortletRequestInfo( portalRequestInfo, targetedPortletRequestInfo, portletWindowId); if (portletRequestInfo == null) { this.logger.warn( "Parameter {} is for the targeted portlet but no portlet is targeted" + " by the request. The parameter will be ignored. Value: {}", name, values); break; } parameterEntryItr.remove(); //Use the enum helper to store the parameter values on the request info suffixedPortletParameter.updateRequestInfo(request, portletWindowRegistry, portletRequestInfo, values, delegateIdMappings); break; } } //Any non-namespaced parameters still need processing? if (!parameterMap.isEmpty()) { //If the parameter was not ignored by a previous parser add it to whatever was targeted (portlet or portal) final Map<String, List<String>> parameters; if (!delegateIdMappings.isEmpty()) { //Resolve the last portlet window in the chain of delegation PortletRequestInfoImpl delegatePortletRequestInfo = null; for (final IPortletWindowId delegatePortletWindowId : delegateIdMappings.values()) { if (!delegateIdMappings.containsKey(delegatePortletWindowId)) { delegatePortletRequestInfo = portalRequestInfo .getPortletRequestInfo(delegatePortletWindowId); break; } } if (delegatePortletRequestInfo != null) { parameters = delegatePortletRequestInfo.getPortletParameters(); } else { this.logger.warn("No root delegate portlet could be resolved, non-namespaced parameters" + " will be sent to the targeted portlet. THIS SHOULD NEVER HAPPEN. Delegate" + " parent/child mapping: {}", delegateIdMappings); if (targetedPortletRequestInfo != null) { parameters = targetedPortletRequestInfo.getPortletParameters(); } else { parameters = portalRequestInfo.getPortalParameters(); } } } else if (targetedPortletRequestInfo != null) { parameters = targetedPortletRequestInfo.getPortletParameters(); } else { parameters = portalRequestInfo.getPortalParameters(); } ParameterMap.putAllList(parameters, parameterMap); } //If a portlet is targeted but no layout node is targeted must be maximized if (targetedPortletRequestInfo != null && portalRequestInfo.getTargetedLayoutNodeId() == null && (requestedUrlState == null || requestedUrlState == UrlState.NORMAL)) { portalRequestInfo.setUrlState(UrlState.MAX); targetedPortletRequestInfo.setWindowState(WindowState.MAXIMIZED); } //Make the request info object read-only, once parsed the request info should be static portalRequestInfo.makeReadOnly(); request.setAttribute(PORTAL_REQUEST_INFO_ATTR, portalRequestInfo); logger.debug("Finished building requestInfo: {}", portalRequestInfo); return portalRequestInfo; } finally { request.removeAttribute(PORTAL_REQUEST_PARSING_IN_PROGRESS_ATTR); } }
From source file:org.apache.james.mailbox.maildir.MaildirFolder.java
/** * Creates a map of recent messages./*from w w w. j ava 2 s .c om*/ * * @param session * @return A {@link Map} with all uids and associated {@link MaildirMessageName}s of recent messages * @throws MailboxException If there is a problem with the uid list file */ public SortedMap<Long, MaildirMessageName> getRecentMessages(final MailboxSession session) throws MailboxException { final String[] recentFiles = getNewFolder().list(); final LinkedList<String> lines = new LinkedList<String>(); final int theLimit = recentFiles.length; return locker.executeWithLock(session, path, new LockAwareExecution<SortedMap<Long, MaildirMessageName>>() { @Override public SortedMap<Long, MaildirMessageName> execute() throws MailboxException { final SortedMap<Long, MaildirMessageName> recentMessages = new TreeMap<Long, MaildirMessageName>(); File uidList = uidFile; try { if (!uidList.isFile()) { if (!uidList.createNewFile()) throw new IOException("Could not create file " + uidList); String[] curFiles = curFolder.list(); String[] newFiles = newFolder.list(); messageCount = curFiles.length + newFiles.length; String[] allFiles = (String[]) ArrayUtils.addAll(curFiles, newFiles); for (String file : allFiles) lines.add(String.valueOf(getNextUid()) + " " + file); PrintWriter pw = new PrintWriter(uidList); try { pw.println(createUidListHeader()); for (String line : lines) pw.println(line); } finally { IOUtils.closeQuietly(pw); } } else { FileReader fileReader = null; BufferedReader reader = null; try { fileReader = new FileReader(uidList); reader = new BufferedReader(fileReader); String line = reader.readLine(); // the first line in the file contains the next uid and message count while ((line = reader.readLine()) != null) lines.add(line); } finally { IOUtils.closeQuietly(reader); IOUtils.closeQuietly(fileReader); } } int counter = 0; String line; while (counter < theLimit) { // walk backwards as recent files are supposedly recent try { line = lines.removeLast(); } catch (NoSuchElementException e) { break; // the list is empty } if (!line.equals("")) { int gap = line.indexOf(" "); if (gap == -1) { // there must be some issues in the file if no gap can be found // there must be some issues in the file if no gap can be found session.getLog() .info("Corrupted entry in uid-file " + uidList + " line " + lines.size()); continue; } Long uid = Long.valueOf(line.substring(0, gap)); String name = line.substring(gap + 1, line.length()); for (String recentFile : recentFiles) { if (recentFile.equals(name)) { recentMessages.put(uid, newMaildirMessageName(MaildirFolder.this, recentFile)); counter++; break; } } } } } catch (IOException e) { throw new MailboxException("Unable to read recent messages", e); } return recentMessages; } }, true); }
From source file:org.apache.sling.resourceresolver.impl.ResourceResolverImpl.java
/** * full implementation - apply sling:alias from the resource path - apply * /etc/map mappings (inkl. config backwards compat) - return absolute uri * if possible/*from w w w . j a va2 s.co m*/ * * @see org.apache.sling.api.resource.ResourceResolver#map(javax.servlet.http.HttpServletRequest, * java.lang.String) */ @Override public String map(final HttpServletRequest request, final String resourcePath) { checkClosed(); // find a fragment or query int fragmentQueryMark = resourcePath.indexOf('#'); if (fragmentQueryMark < 0) { fragmentQueryMark = resourcePath.indexOf('?'); } // cut fragment or query off the resource path String mappedPath; final String fragmentQuery; if (fragmentQueryMark >= 0) { fragmentQuery = resourcePath.substring(fragmentQueryMark); mappedPath = resourcePath.substring(0, fragmentQueryMark); logger.debug("map: Splitting resource path '{}' into '{}' and '{}'", new Object[] { resourcePath, mappedPath, fragmentQuery }); } else { fragmentQuery = null; mappedPath = resourcePath; } // cut off scheme and host, if the same as requested final String schemehostport; final String schemePrefix; if (request != null) { schemehostport = MapEntry.getURI(request.getScheme(), request.getServerName(), request.getServerPort(), "/"); schemePrefix = request.getScheme().concat("://"); logger.debug("map: Mapping path {} for {} (at least with scheme prefix {})", new Object[] { resourcePath, schemehostport, schemePrefix }); } else { schemehostport = null; schemePrefix = null; logger.debug("map: Mapping path {} for default", resourcePath); } ParsedParameters parsed = new ParsedParameters(mappedPath); final Resource res = resolveInternal(parsed.getRawPath(), parsed.getParameters()); if (res != null) { // keep, what we might have cut off in internal resolution final String resolutionPathInfo = res.getResourceMetadata().getResolutionPathInfo(); logger.debug("map: Path maps to resource {} with path info {}", res, resolutionPathInfo); // find aliases for segments. we can't walk the parent chain // since the request session might not have permissions to // read all parents SLING-2093 final LinkedList<String> names = new LinkedList<String>(); Resource current = res; String path = res.getPath(); while (path != null) { String alias = null; if (current != null && !path.endsWith(JCR_CONTENT_LEAF)) { if (factory.getMapEntries().isOptimizeAliasResolutionEnabled()) { logger.debug("map: Optimize Alias Resolution is Enabled"); String parentPath = ResourceUtil.getParent(path); if (parentPath != null) { final Map<String, String> aliases = factory.getMapEntries().getAliasMap(parentPath); if (aliases != null && aliases.containsValue(current.getName())) { for (String key : aliases.keySet()) { if (current.getName().equals(aliases.get(key))) { alias = key; break; } } } } } else { logger.debug("map: Optimize Alias Resolution is Disabled"); alias = ResourceResolverControl.getProperty(current, PROP_ALIAS); } } if (alias == null || alias.length() == 0) { alias = ResourceUtil.getName(path); } names.add(alias); path = ResourceUtil.getParent(path); if ("/".equals(path)) { path = null; } else if (path != null) { current = res.getResourceResolver().resolve(path); } } // build path from segment names final StringBuilder buf = new StringBuilder(); // construct the path from the segments (or root if none) if (names.isEmpty()) { buf.append('/'); } else { while (!names.isEmpty()) { buf.append('/'); buf.append(names.removeLast()); } } // reappend the resolutionPathInfo if (resolutionPathInfo != null) { buf.append(resolutionPathInfo); } // and then we have the mapped path to work on mappedPath = buf.toString(); logger.debug("map: Alias mapping resolves to path {}", mappedPath); } boolean mappedPathIsUrl = false; for (final MapEntry mapEntry : this.factory.getMapEntries().getMapMaps()) { final String[] mappedPaths = mapEntry.replace(mappedPath); if (mappedPaths != null) { logger.debug("map: Match for Entry {}", mapEntry); mappedPathIsUrl = !mapEntry.isInternal(); if (mappedPathIsUrl && schemehostport != null) { mappedPath = null; for (final String candidate : mappedPaths) { if (candidate.startsWith(schemehostport)) { mappedPath = candidate.substring(schemehostport.length() - 1); mappedPathIsUrl = false; logger.debug("map: Found host specific mapping {} resolving to {}", candidate, mappedPath); break; } else if (candidate.startsWith(schemePrefix) && mappedPath == null) { mappedPath = candidate; } } if (mappedPath == null) { mappedPath = mappedPaths[0]; } } else { // we can only go with assumptions selecting the first entry mappedPath = mappedPaths[0]; } logger.debug("resolve: MapEntry {} matches, mapped path is {}", mapEntry, mappedPath); break; } } // this should not be the case, since mappedPath is primed if (mappedPath == null) { mappedPath = resourcePath; } // [scheme:][//authority][path][?query][#fragment] try { // use commons-httpclient's URI instead of java.net.URI, as it can // actually accept *unescaped* URIs, such as the "mappedPath" and // return them in proper escaped form, including the path, via // toString() final URI uri = new URI(mappedPath, false); // 1. mangle the namespaces in the path String path = mangleNamespaces(uri.getPath()); // 2. prepend servlet context path if we have a request if (request != null && request.getContextPath() != null && request.getContextPath().length() > 0) { path = request.getContextPath().concat(path); } // update the path part of the URI uri.setPath(path); mappedPath = uri.toString(); } catch (final URIException e) { logger.warn("map: Unable to mangle namespaces for " + mappedPath + " returning unmangled", e); } logger.debug("map: Returning URL {} as mapping for path {}", mappedPath, resourcePath); // reappend fragment and/or query if (fragmentQuery != null) { mappedPath = mappedPath.concat(fragmentQuery); } return mappedPath; }
From source file:org.springframework.beans.factory.support.ConstructorResolver.java
/** * "autowire constructor" (with constructor arguments by type) behavior. * Also applied if explicit constructor argument values are specified, * matching all remaining arguments with beans from the bean factory. * <p>This corresponds to constructor injection: In this mode, a Spring * bean factory is able to host components that expect constructor-based * dependency resolution./*from w w w .j a v a2 s . c o m*/ * @param beanName the name of the bean * @param mbd the merged bean definition for the bean * @param chosenCtors chosen candidate constructors (or {@code null} if none) * @param explicitArgs argument values passed in programmatically via the getBean method, * or {@code null} if none (-> use constructor argument values from bean definition) * @return a BeanWrapper for the new instance */ public BeanWrapper autowireConstructor(final String beanName, final RootBeanDefinition mbd, @Nullable Constructor<?>[] chosenCtors, @Nullable final Object[] explicitArgs) { BeanWrapperImpl bw = new BeanWrapperImpl(); this.beanFactory.initBeanWrapper(bw); Constructor<?> constructorToUse = null; ArgumentsHolder argsHolderToUse = null; Object[] argsToUse = null; if (explicitArgs != null) { argsToUse = explicitArgs; } else { Object[] argsToResolve = null; synchronized (mbd.constructorArgumentLock) { constructorToUse = (Constructor<?>) mbd.resolvedConstructorOrFactoryMethod; if (constructorToUse != null && mbd.constructorArgumentsResolved) { // Found a cached constructor... argsToUse = mbd.resolvedConstructorArguments; if (argsToUse == null) { argsToResolve = mbd.preparedConstructorArguments; } } } if (argsToResolve != null) { argsToUse = resolvePreparedArguments(beanName, mbd, bw, constructorToUse, argsToResolve, true); } } if (constructorToUse == null) { // Need to resolve the constructor. boolean autowiring = (chosenCtors != null || mbd.getResolvedAutowireMode() == RootBeanDefinition.AUTOWIRE_CONSTRUCTOR); ConstructorArgumentValues resolvedValues = null; int minNrOfArgs; if (explicitArgs != null) { minNrOfArgs = explicitArgs.length; } else { ConstructorArgumentValues cargs = mbd.getConstructorArgumentValues(); resolvedValues = new ConstructorArgumentValues(); minNrOfArgs = resolveConstructorArguments(beanName, mbd, bw, cargs, resolvedValues); } // Take specified constructors, if any. Constructor<?>[] candidates = chosenCtors; if (candidates == null) { Class<?> beanClass = mbd.getBeanClass(); try { candidates = (mbd.isNonPublicAccessAllowed() ? beanClass.getDeclaredConstructors() : beanClass.getConstructors()); } catch (Throwable ex) { throw new BeanCreationException(mbd.getResourceDescription(), beanName, "Resolution of declared constructors on bean Class [" + beanClass.getName() + "] from ClassLoader [" + beanClass.getClassLoader() + "] failed", ex); } } AutowireUtils.sortConstructors(candidates); int minTypeDiffWeight = Integer.MAX_VALUE; Set<Constructor<?>> ambiguousConstructors = null; LinkedList<UnsatisfiedDependencyException> causes = null; for (Constructor<?> candidate : candidates) { Class<?>[] paramTypes = candidate.getParameterTypes(); if (constructorToUse != null && argsToUse.length > paramTypes.length) { // Already found greedy constructor that can be satisfied -> // do not look any further, there are only less greedy constructors left. break; } if (paramTypes.length < minNrOfArgs) { continue; } ArgumentsHolder argsHolder; if (resolvedValues != null) { try { String[] paramNames = ConstructorPropertiesChecker.evaluate(candidate, paramTypes.length); if (paramNames == null) { ParameterNameDiscoverer pnd = this.beanFactory.getParameterNameDiscoverer(); if (pnd != null) { paramNames = pnd.getParameterNames(candidate); } } argsHolder = createArgumentArray(beanName, mbd, resolvedValues, bw, paramTypes, paramNames, getUserDeclaredConstructor(candidate), autowiring, candidates.length == 1); } catch (UnsatisfiedDependencyException ex) { if (logger.isTraceEnabled()) { logger.trace( "Ignoring constructor [" + candidate + "] of bean '" + beanName + "': " + ex); } // Swallow and try next constructor. if (causes == null) { causes = new LinkedList<>(); } causes.add(ex); continue; } } else { // Explicit arguments given -> arguments length must match exactly. if (paramTypes.length != explicitArgs.length) { continue; } argsHolder = new ArgumentsHolder(explicitArgs); } int typeDiffWeight = (mbd.isLenientConstructorResolution() ? argsHolder.getTypeDifferenceWeight(paramTypes) : argsHolder.getAssignabilityWeight(paramTypes)); // Choose this constructor if it represents the closest match. if (typeDiffWeight < minTypeDiffWeight) { constructorToUse = candidate; argsHolderToUse = argsHolder; argsToUse = argsHolder.arguments; minTypeDiffWeight = typeDiffWeight; ambiguousConstructors = null; } else if (constructorToUse != null && typeDiffWeight == minTypeDiffWeight) { if (ambiguousConstructors == null) { ambiguousConstructors = new LinkedHashSet<>(); ambiguousConstructors.add(constructorToUse); } ambiguousConstructors.add(candidate); } } if (constructorToUse == null) { if (causes != null) { UnsatisfiedDependencyException ex = causes.removeLast(); for (Exception cause : causes) { this.beanFactory.onSuppressedException(cause); } throw ex; } throw new BeanCreationException(mbd.getResourceDescription(), beanName, "Could not resolve matching constructor " + "(hint: specify index/type/name arguments for simple parameters to avoid type ambiguities)"); } else if (ambiguousConstructors != null && !mbd.isLenientConstructorResolution()) { throw new BeanCreationException(mbd.getResourceDescription(), beanName, "Ambiguous constructor matches found in bean '" + beanName + "' " + "(hint: specify index/type/name arguments for simple parameters to avoid type ambiguities): " + ambiguousConstructors); } if (explicitArgs == null) { argsHolderToUse.storeCache(mbd, constructorToUse); } } try { final InstantiationStrategy strategy = this.beanFactory.getInstantiationStrategy(); Object beanInstance; if (System.getSecurityManager() != null) { final Constructor<?> ctorToUse = constructorToUse; final Object[] argumentsToUse = argsToUse; beanInstance = AccessController .doPrivileged( (PrivilegedAction<Object>) () -> strategy.instantiate(mbd, beanName, this.beanFactory, ctorToUse, argumentsToUse), this.beanFactory.getAccessControlContext()); } else { beanInstance = strategy.instantiate(mbd, beanName, this.beanFactory, constructorToUse, argsToUse); } bw.setBeanInstance(beanInstance); return bw; } catch (Throwable ex) { throw new BeanCreationException(mbd.getResourceDescription(), beanName, "Bean instantiation via constructor failed", ex); } }
From source file:org.springframework.beans.factory.support.ConstructorResolver.java
/** * Instantiate the bean using a named factory method. The method may be static, if the * bean definition parameter specifies a class, rather than a "factory-bean", or * an instance variable on a factory object itself configured using Dependency Injection. * <p>Implementation requires iterating over the static or instance methods with the * name specified in the RootBeanDefinition (the method may be overloaded) and trying * to match with the parameters. We don't have the types attached to constructor args, * so trial and error is the only way to go here. The explicitArgs array may contain * argument values passed in programmatically via the corresponding getBean method. * @param beanName the name of the bean// w w w.j a v a 2 s. com * @param mbd the merged bean definition for the bean * @param explicitArgs argument values passed in programmatically via the getBean * method, or {@code null} if none (-> use constructor argument values from bean definition) * @return a BeanWrapper for the new instance */ public BeanWrapper instantiateUsingFactoryMethod(final String beanName, final RootBeanDefinition mbd, @Nullable final Object[] explicitArgs) { BeanWrapperImpl bw = new BeanWrapperImpl(); this.beanFactory.initBeanWrapper(bw); Object factoryBean; Class<?> factoryClass; boolean isStatic; String factoryBeanName = mbd.getFactoryBeanName(); if (factoryBeanName != null) { if (factoryBeanName.equals(beanName)) { throw new BeanDefinitionStoreException(mbd.getResourceDescription(), beanName, "factory-bean reference points back to the same bean definition"); } factoryBean = this.beanFactory.getBean(factoryBeanName); if (mbd.isSingleton() && this.beanFactory.containsSingleton(beanName)) { throw new ImplicitlyAppearedSingletonException(); } factoryClass = factoryBean.getClass(); isStatic = false; } else { // It's a static factory method on the bean class. if (!mbd.hasBeanClass()) { throw new BeanDefinitionStoreException(mbd.getResourceDescription(), beanName, "bean definition declares neither a bean class nor a factory-bean reference"); } factoryBean = null; factoryClass = mbd.getBeanClass(); isStatic = true; } Method factoryMethodToUse = null; ArgumentsHolder argsHolderToUse = null; Object[] argsToUse = null; if (explicitArgs != null) { argsToUse = explicitArgs; } else { Object[] argsToResolve = null; synchronized (mbd.constructorArgumentLock) { factoryMethodToUse = (Method) mbd.resolvedConstructorOrFactoryMethod; if (factoryMethodToUse != null && mbd.constructorArgumentsResolved) { // Found a cached factory method... argsToUse = mbd.resolvedConstructorArguments; if (argsToUse == null) { argsToResolve = mbd.preparedConstructorArguments; } } } if (argsToResolve != null) { argsToUse = resolvePreparedArguments(beanName, mbd, bw, factoryMethodToUse, argsToResolve, true); } } if (factoryMethodToUse == null || argsToUse == null) { // Need to determine the factory method... // Try all methods with this name to see if they match the given arguments. factoryClass = ClassUtils.getUserClass(factoryClass); Method[] rawCandidates = getCandidateMethods(factoryClass, mbd); List<Method> candidateSet = new ArrayList<>(); for (Method candidate : rawCandidates) { if (Modifier.isStatic(candidate.getModifiers()) == isStatic && mbd.isFactoryMethod(candidate)) { candidateSet.add(candidate); } } Method[] candidates = candidateSet.toArray(new Method[0]); AutowireUtils.sortFactoryMethods(candidates); ConstructorArgumentValues resolvedValues = null; boolean autowiring = (mbd.getResolvedAutowireMode() == RootBeanDefinition.AUTOWIRE_CONSTRUCTOR); int minTypeDiffWeight = Integer.MAX_VALUE; Set<Method> ambiguousFactoryMethods = null; int minNrOfArgs; if (explicitArgs != null) { minNrOfArgs = explicitArgs.length; } else { // We don't have arguments passed in programmatically, so we need to resolve the // arguments specified in the constructor arguments held in the bean definition. if (mbd.hasConstructorArgumentValues()) { ConstructorArgumentValues cargs = mbd.getConstructorArgumentValues(); resolvedValues = new ConstructorArgumentValues(); minNrOfArgs = resolveConstructorArguments(beanName, mbd, bw, cargs, resolvedValues); } else { minNrOfArgs = 0; } } LinkedList<UnsatisfiedDependencyException> causes = null; for (Method candidate : candidates) { Class<?>[] paramTypes = candidate.getParameterTypes(); if (paramTypes.length >= minNrOfArgs) { ArgumentsHolder argsHolder; if (explicitArgs != null) { // Explicit arguments given -> arguments length must match exactly. if (paramTypes.length != explicitArgs.length) { continue; } argsHolder = new ArgumentsHolder(explicitArgs); } else { // Resolved constructor arguments: type conversion and/or autowiring necessary. try { String[] paramNames = null; ParameterNameDiscoverer pnd = this.beanFactory.getParameterNameDiscoverer(); if (pnd != null) { paramNames = pnd.getParameterNames(candidate); } argsHolder = createArgumentArray(beanName, mbd, resolvedValues, bw, paramTypes, paramNames, candidate, autowiring, candidates.length == 1); } catch (UnsatisfiedDependencyException ex) { if (logger.isTraceEnabled()) { logger.trace("Ignoring factory method [" + candidate + "] of bean '" + beanName + "': " + ex); } // Swallow and try next overloaded factory method. if (causes == null) { causes = new LinkedList<>(); } causes.add(ex); continue; } } int typeDiffWeight = (mbd.isLenientConstructorResolution() ? argsHolder.getTypeDifferenceWeight(paramTypes) : argsHolder.getAssignabilityWeight(paramTypes)); // Choose this factory method if it represents the closest match. if (typeDiffWeight < minTypeDiffWeight) { factoryMethodToUse = candidate; argsHolderToUse = argsHolder; argsToUse = argsHolder.arguments; minTypeDiffWeight = typeDiffWeight; ambiguousFactoryMethods = null; } // Find out about ambiguity: In case of the same type difference weight // for methods with the same number of parameters, collect such candidates // and eventually raise an ambiguity exception. // However, only perform that check in non-lenient constructor resolution mode, // and explicitly ignore overridden methods (with the same parameter signature). else if (factoryMethodToUse != null && typeDiffWeight == minTypeDiffWeight && !mbd.isLenientConstructorResolution() && paramTypes.length == factoryMethodToUse.getParameterCount() && !Arrays.equals(paramTypes, factoryMethodToUse.getParameterTypes())) { if (ambiguousFactoryMethods == null) { ambiguousFactoryMethods = new LinkedHashSet<>(); ambiguousFactoryMethods.add(factoryMethodToUse); } ambiguousFactoryMethods.add(candidate); } } } if (factoryMethodToUse == null) { if (causes != null) { UnsatisfiedDependencyException ex = causes.removeLast(); for (Exception cause : causes) { this.beanFactory.onSuppressedException(cause); } throw ex; } List<String> argTypes = new ArrayList<>(minNrOfArgs); if (explicitArgs != null) { for (Object arg : explicitArgs) { argTypes.add(arg != null ? arg.getClass().getSimpleName() : "null"); } } else if (resolvedValues != null) { Set<ValueHolder> valueHolders = new LinkedHashSet<>(resolvedValues.getArgumentCount()); valueHolders.addAll(resolvedValues.getIndexedArgumentValues().values()); valueHolders.addAll(resolvedValues.getGenericArgumentValues()); for (ValueHolder value : valueHolders) { String argType = (value.getType() != null ? ClassUtils.getShortName(value.getType()) : (value.getValue() != null ? value.getValue().getClass().getSimpleName() : "null")); argTypes.add(argType); } } String argDesc = StringUtils.collectionToCommaDelimitedString(argTypes); throw new BeanCreationException(mbd.getResourceDescription(), beanName, "No matching factory method found: " + (mbd.getFactoryBeanName() != null ? "factory bean '" + mbd.getFactoryBeanName() + "'; " : "") + "factory method '" + mbd.getFactoryMethodName() + "(" + argDesc + ")'. " + "Check that a method with the specified name " + (minNrOfArgs > 0 ? "and arguments " : "") + "exists and that it is " + (isStatic ? "static" : "non-static") + "."); } else if (void.class == factoryMethodToUse.getReturnType()) { throw new BeanCreationException(mbd.getResourceDescription(), beanName, "Invalid factory method '" + mbd.getFactoryMethodName() + "': needs to have a non-void return type!"); } else if (ambiguousFactoryMethods != null) { throw new BeanCreationException(mbd.getResourceDescription(), beanName, "Ambiguous factory method matches found in bean '" + beanName + "' " + "(hint: specify index/type/name arguments for simple parameters to avoid type ambiguities): " + ambiguousFactoryMethods); } if (explicitArgs == null && argsHolderToUse != null) { argsHolderToUse.storeCache(mbd, factoryMethodToUse); } } try { Object beanInstance; if (System.getSecurityManager() != null) { final Object fb = factoryBean; final Method factoryMethod = factoryMethodToUse; final Object[] args = argsToUse; beanInstance = AccessController.doPrivileged( (PrivilegedAction<Object>) () -> this.beanFactory.getInstantiationStrategy() .instantiate(mbd, beanName, this.beanFactory, fb, factoryMethod, args), this.beanFactory.getAccessControlContext()); } else { beanInstance = this.beanFactory.getInstantiationStrategy().instantiate(mbd, beanName, this.beanFactory, factoryBean, factoryMethodToUse, argsToUse); } bw.setBeanInstance(beanInstance); return bw; } catch (Throwable ex) { throw new BeanCreationException(mbd.getResourceDescription(), beanName, "Bean instantiation via factory method failed", ex); } }
From source file:org.trnltk.experiment.morphology.ambiguity.DataDiffUtil.java
/** * Reorder and merge like edit sections. Merge equalities. * Any edit section can move as long as it doesn't cross an equality. * * @param diffs LinkedList of Diff objects. *///from ww w.ja va 2 s . c o m public void diff_cleanupMerge(LinkedList<Diff<T>> diffs) { diffs.add(new Diff<T>(Operation.EQUAL, new ArrayList<T>())); // Add a dummy entry at the end. ListIterator<Diff<T>> pointer = diffs.listIterator(); int count_delete = 0; int count_insert = 0; List<T> text_delete = new ArrayList<T>(); List<T> text_insert = new ArrayList<T>(); Diff thisDiff = pointer.next(); Diff prevEqual = null; int commonlength; while (thisDiff != null) { switch (thisDiff.operation) { case INSERT: count_insert++; text_insert = ListUtils.union(text_insert, thisDiff.text); prevEqual = null; break; case DELETE: count_delete++; text_delete = ListUtils.union(text_delete, thisDiff.text); prevEqual = null; break; case EQUAL: if (count_delete + count_insert > 1) { boolean both_types = count_delete != 0 && count_insert != 0; // Delete the offending records. pointer.previous(); // Reverse direction. while (count_delete-- > 0) { pointer.previous(); pointer.remove(); } while (count_insert-- > 0) { pointer.previous(); pointer.remove(); } if (both_types) { // Factor out any common prefixies. commonlength = diff_commonPrefix(text_insert, text_delete); if (commonlength != 0) { if (pointer.hasPrevious()) { thisDiff = pointer.previous(); assert thisDiff.operation == Operation.EQUAL : "Previous diff should have been an equality."; thisDiff.text = ListUtils.union(thisDiff.text, text_insert.subList(0, commonlength)); pointer.next(); } else { pointer.add(new Diff(Operation.EQUAL, text_insert.subList(0, commonlength))); } text_insert = text_insert.subList(commonlength, text_insert.size()); text_delete = text_delete.subList(commonlength, text_delete.size()); } // Factor out any common suffixies. commonlength = diff_commonSuffix(text_insert, text_delete); if (commonlength != 0) { thisDiff = pointer.next(); thisDiff.text = ListUtils.union( text_insert.subList(text_insert.size() - commonlength, text_insert.size()), thisDiff.text); text_insert = text_insert.subList(0, text_insert.size() - commonlength); text_delete = text_delete.subList(0, text_delete.size() - commonlength); pointer.previous(); } } // Insert the merged records. if (text_delete.size() != 0) { pointer.add(new Diff(Operation.DELETE, text_delete)); } if (text_insert.size() != 0) { pointer.add(new Diff(Operation.INSERT, text_insert)); } // Step forward to the equality. thisDiff = pointer.hasNext() ? pointer.next() : null; } else if (prevEqual != null) { // Merge this equality with the previous one. prevEqual.text = ListUtils.union(prevEqual.text, thisDiff.text); pointer.remove(); thisDiff = pointer.previous(); pointer.next(); // Forward direction } count_insert = 0; count_delete = 0; text_delete = new ArrayList<T>(); text_insert = new ArrayList<T>(); prevEqual = thisDiff; break; } thisDiff = pointer.hasNext() ? pointer.next() : null; } if (diffs.getLast().text.size() == 0) { diffs.removeLast(); // Remove the dummy entry at the end. } /* * Second pass: look for single edits surrounded on both sides by equalities * which can be shifted sideways to eliminate an equality. * e.g: A<ins>BA</ins>C -> <ins>AB</ins>AC */ boolean changes = false; // Create a new iterator at the start. // (As opposed to walking the current one back.) pointer = diffs.listIterator(); Diff<T> prevDiff = pointer.hasNext() ? pointer.next() : null; thisDiff = pointer.hasNext() ? pointer.next() : null; Diff nextDiff = pointer.hasNext() ? pointer.next() : null; // Intentionally ignore the first and last element (don't need checking). while (nextDiff != null) { if (prevDiff.operation == Operation.EQUAL && nextDiff.operation == Operation.EQUAL) { // This is a single edit surrounded by equalities. if (endsWith(thisDiff.text, prevDiff.text)) { // Shift the edit over the previous equality. thisDiff.text = ListUtils.union(prevDiff.text, thisDiff.text.subList(0, thisDiff.text.size() - prevDiff.text.size())); nextDiff.text = ListUtils.union(prevDiff.text, nextDiff.text); pointer.previous(); // Walk past nextDiff. pointer.previous(); // Walk past thisDiff. pointer.previous(); // Walk past prevDiff. pointer.remove(); // Delete prevDiff. pointer.next(); // Walk past thisDiff. thisDiff = pointer.next(); // Walk past nextDiff. nextDiff = pointer.hasNext() ? pointer.next() : null; changes = true; } else if (startsWith(thisDiff.text, nextDiff.text)) { // Shift the edit over the next equality. prevDiff.text = ListUtils.union(prevDiff.text, nextDiff.text); thisDiff.text = ListUtils.union( thisDiff.text.subList(nextDiff.text.size(), thisDiff.text.size()), nextDiff.text); pointer.remove(); // Delete nextDiff. nextDiff = pointer.hasNext() ? pointer.next() : null; changes = true; } } prevDiff = thisDiff; thisDiff = nextDiff; nextDiff = pointer.hasNext() ? pointer.next() : null; } // If shifts were made, the diff needs reordering and another shift sweep. if (changes) { diff_cleanupMerge(diffs); } }
From source file:com.edgenius.wiki.render.filter.MacroFilter.java
/** *//* w w w . j av a 2 s. c o m*/ private void checkGroup(final int initPos, CharSequence input, final LinkedList<GroupProcessor> stack, List<GroupProcessor> processors) { final List<Region> pairRegions = new ArrayList<Region>(); singleMacroProvider.replaceByTokenVisitor(input, new TokenVisitor<Matcher>() { public void handleMatch(StringBuffer buffer, Matcher result) { String macroName = result.group(1); if (macroName != null && !macroName.startsWith("$")) { Macro macro = macroMgr.getMacro(macroName); if (macro != null) { //IMPORTANT: here does not check Macro.isPair() and also put it into pairRegions for following process //it is the sequence of process must keep consistant with physical sequence in input text, //for example, {table}{cell}...{rowdiv}, rowdiv is single and must be after cell int start = result.start(0); int end = result.end(0); Region pair = new Region(start, end); //no parameter, then mark as unknown, otherwise, must be a start macro if (StringUtils.isBlank(result.group(2))) { pair.setKey(MACRO_REGION_KEY_UNKNOWN); } else { pair.setKey(MACRO_REGION_KEY_START); } //just for temporary to remember the macro name... pair.setContent(macroName); //sum to list pairRegions.add(pair); } } } }); int size = pairRegions.size(); if (size > 0) { StringBuffer inputBuf = new StringBuffer(input); for (int idx = 0; idx < size; idx++) { Region reg = pairRegions.get(idx); Macro macro = macroMgr.getMacro(reg.getContent()); if (macro.isPaired()) { int deep = 0; Region pair = null; //looking for pairs... for (int chIdx = idx + 1; chIdx < size; chIdx++) { Region next = pairRegions.get(chIdx); if (StringUtils.equalsIgnoreCase(reg.getContent(), next.getContent())) { //start is unknown (no attribute), then end must be unknown if (MACRO_REGION_KEY_UNKNOWN.equals(reg.getKey()) && MACRO_REGION_KEY_UNKNOWN.equals(next.getKey())) { //matched pair = next; //skip all internal node - which is handle by embedded recursive idx = chIdx; break; } if (MACRO_REGION_KEY_START.equals(reg.getKey()) && MACRO_REGION_KEY_UNKNOWN.equals(next.getKey())) { if (deep == 0) { //matched; pair = next; //skip all internal node - which is handle by embedded recursive idx = chIdx; break; } else { //just another inner same name macro matched, deep minus 1 deep--; } } if (MACRO_REGION_KEY_START.equals(next.getKey())) { //ok, it gets another start, in 4th scenarios - then add deep deep++; } } } //ok, success find paired if (pair != null) { int start = initPos + reg.getStart(); int end = initPos + pair.getEnd(); int contentStart = initPos + reg.getEnd(); int contentEnd = initPos + pair.getStart(); GroupProcessor currProcessor = stack.size() == 0 ? null : stack.getLast(); if (currProcessor != null) { currProcessor.adoptChild(macro, start, end); } if (macro.isProcessEmbedded() && (end > start)) { if (macro.hasChildren() != null) { stack.add(((GroupProcessorMacro) macro).newGroupProcessor(macro, start, end)); } checkGroup(contentStart, inputBuf.subSequence(contentStart - initPos, contentEnd - initPos), stack, processors); if (macro.hasChildren() != null) { //pop the current one, means it is a completed GroupProcessor processors.add(stack.removeLast()); } } } } else { //single macro - directly detect if it is child GroupProcessor currProcessor = stack.size() == 0 ? null : stack.getLast(); if (currProcessor != null) { currProcessor.adoptChild(macro, initPos + reg.getStart(), initPos + reg.getEnd()); } } } } }
From source file:de.innovationgate.wgpublisher.webtml.utils.TMLContext.java
public void removeThreadMainContext() { LinkedList<TMLContext> contexts = _threadMainContexts.get(); if ("true".equals(System.getProperty("de.innovationgate.wga.threadmaincontexts.verbose"))) { try {/* w ww . j a v a 2 s. c o m*/ StackTraceElement[] elements = Thread.currentThread().getStackTrace(); getlog().info("Thread " + Thread.currentThread().hashCode() + " - Removing main context '" + getpath() + "' (" + System.identityHashCode(this) + ") at position " + (contexts.size() - 1) + ", Stacktrace " + elements[2]); } catch (WGAPIException e) { // TODO Auto-generated catch block e.printStackTrace(); } } if (contexts != null) { TMLContext lastContext = contexts.removeLast(); if (lastContext != this) { try { getlog().error( "Error in WebTML context management: Thread main context mismatch on removal. Removed: " + this.getpath() + " (" + System.identityHashCode(this) + "), In stack: " + lastContext.getpath() + " (" + System.identityHashCode(lastContext) + ")"); } catch (WGAPIException e) { } } } }
From source file:org.alfresco.solr.query.Solr4QueryParser.java
@SuppressWarnings("unchecked") protected Query getFieldQueryImpl(String field, String queryText, AnalysisMode analysisMode, LuceneFunction luceneFunction) throws ParseException, IOException { // make sure the field exists or return a dummy query so we have no error ....ACE-3231 SchemaField schemaField = schema.getFieldOrNull(field); boolean isNumeric = false; if (schemaField == null) { return new TermQuery(new Term("_dummy_", "_miss_")); } else {/* www.j a va 2 s . c o m*/ isNumeric = (schemaField.getType().getNumericType() != null); } // Use the analyzer to get all the tokens, and then build a TermQuery, // PhraseQuery, or noth // TODO: Untokenised columns with functions require special handling if (luceneFunction != LuceneFunction.FIELD) { throw new UnsupportedOperationException( "Field queries are not supported on lucene functions (UPPER, LOWER, etc)"); } // if the incoming string already has a language identifier we strip it iff and addit back on again String localePrefix = ""; String toTokenise = queryText; if (queryText.startsWith("{")) { int position = queryText.indexOf("}"); if (position > 0) { String language = queryText.substring(0, position + 1); Locale locale = new Locale(queryText.substring(1, position)); String token = queryText.substring(position + 1); boolean found = false; for (Locale current : Locale.getAvailableLocales()) { if (current.toString().equalsIgnoreCase(locale.toString())) { found = true; break; } } if (found) { localePrefix = language; toTokenise = token; } else { //toTokenise = token; } } } String testText = toTokenise; boolean requiresMLTokenDuplication = false; String localeString = null; if (isPropertyField(field) && (localePrefix.length() == 0)) { if ((queryText.length() > 0) && (queryText.charAt(0) == '\u0000')) { int position = queryText.indexOf("\u0000", 1); testText = queryText.substring(position + 1); requiresMLTokenDuplication = true; localeString = queryText.substring(1, position); } } // find the positions of any escaped * and ? and ignore them Set<Integer> wildcardPoistions = getWildcardPositions(testText); TokenStream source = null; ArrayList<org.apache.lucene.analysis.Token> list = new ArrayList<org.apache.lucene.analysis.Token>(); boolean severalTokensAtSamePosition = false; org.apache.lucene.analysis.Token nextToken; int positionCount = 0; try { org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token(); source = getAnalyzer().tokenStream(field, new StringReader(toTokenise)); source.reset(); while (source.incrementToken()) { CharTermAttribute cta = source.getAttribute(CharTermAttribute.class); OffsetAttribute offsetAtt = source.getAttribute(OffsetAttribute.class); TypeAttribute typeAtt = null; if (source.hasAttribute(TypeAttribute.class)) { typeAtt = source.getAttribute(TypeAttribute.class); } PositionIncrementAttribute posIncAtt = null; if (source.hasAttribute(PositionIncrementAttribute.class)) { posIncAtt = source.getAttribute(PositionIncrementAttribute.class); } nextToken = new Token(cta.buffer(), 0, cta.length(), offsetAtt.startOffset(), offsetAtt.endOffset()); if (typeAtt != null) { nextToken.setType(typeAtt.type()); } if (posIncAtt != null) { nextToken.setPositionIncrement(posIncAtt.getPositionIncrement()); } list.add(nextToken); if (nextToken.getPositionIncrement() != 0) positionCount += nextToken.getPositionIncrement(); else severalTokensAtSamePosition = true; } } catch (SolrException e) { // MNT-15336 // Text against a numeric field should fail silently rather then tell you it is not possible. if (isNumeric && e.getMessage() != null && e.getMessage().startsWith("Invalid Number:")) { // Generate a query that does not match any document - rather than nothing return createNoMatchQuery(); } else { throw e; } } finally { try { if (source != null) { source.close(); } } catch (IOException e) { // ignore } } // add any alpha numeric wildcards that have been missed // Fixes most stop word and wild card issues for (int index = 0; index < testText.length(); index++) { char current = testText.charAt(index); if (((current == '*') || (current == '?')) && wildcardPoistions.contains(index)) { StringBuilder pre = new StringBuilder(10); if (index == 0) { // "*" and "?" at the start boolean found = false; for (int j = 0; j < list.size(); j++) { org.apache.lucene.analysis.Token test = list.get(j); if ((test.startOffset() <= 0) && (0 < test.endOffset())) { found = true; break; } } if (!found && (list.size() == 0)) { // Add new token followed by * not given by the tokeniser org.apache.lucene.analysis.Token newToken = new org.apache.lucene.analysis.Token("", 0, 0); newToken.setType("ALPHANUM"); if (requiresMLTokenDuplication) { Locale locale = I18NUtil.parseLocale(localeString); MLTokenDuplicator duplicator = new MLTokenDuplicator(locale, MLAnalysisMode.EXACT_LANGUAGE); Iterator<org.apache.lucene.analysis.Token> it = duplicator.buildIterator(newToken); if (it != null) { int count = 0; while (it.hasNext()) { list.add(it.next()); count++; if (count > 1) { severalTokensAtSamePosition = true; } } } } // content else { list.add(newToken); } } } else if (index > 0) { // Add * and ? back into any tokens from which it has been removed boolean tokenFound = false; for (int j = 0; j < list.size(); j++) { org.apache.lucene.analysis.Token test = list.get(j); if ((test.startOffset() <= index) && (index < test.endOffset())) { if (requiresMLTokenDuplication) { String termText = test.toString(); int position = termText.indexOf("}"); String language = termText.substring(0, position + 1); String token = termText.substring(position + 1); if (index >= test.startOffset() + token.length()) { test.setEmpty(); test.append(language + token + current); } } else { if (index >= test.startOffset() + test.length()) { test.setEmpty(); test.append(test.toString() + current); } } tokenFound = true; break; } } if (!tokenFound) { for (int i = index - 1; i >= 0; i--) { char c = testText.charAt(i); if (Character.isLetterOrDigit(c)) { boolean found = false; for (int j = 0; j < list.size(); j++) { org.apache.lucene.analysis.Token test = list.get(j); if ((test.startOffset() <= i) && (i < test.endOffset())) { found = true; break; } } if (found) { break; } else { pre.insert(0, c); } } else { break; } } if (pre.length() > 0) { // Add new token followed by * not given by the tokeniser org.apache.lucene.analysis.Token newToken = new org.apache.lucene.analysis.Token( pre.toString(), index - pre.length(), index); newToken.setType("ALPHANUM"); if (requiresMLTokenDuplication) { Locale locale = I18NUtil.parseLocale(localeString); MLTokenDuplicator duplicator = new MLTokenDuplicator(locale, MLAnalysisMode.EXACT_LANGUAGE); Iterator<org.apache.lucene.analysis.Token> it = duplicator.buildIterator(newToken); if (it != null) { int count = 0; while (it.hasNext()) { list.add(it.next()); count++; if (count > 1) { severalTokensAtSamePosition = true; } } } } // content else { list.add(newToken); } } } } StringBuilder post = new StringBuilder(10); if (index > 0) { for (int i = index + 1; i < testText.length(); i++) { char c = testText.charAt(i); if (Character.isLetterOrDigit(c)) { boolean found = false; for (int j = 0; j < list.size(); j++) { org.apache.lucene.analysis.Token test = list.get(j); if ((test.startOffset() <= i) && (i < test.endOffset())) { found = true; break; } } if (found) { break; } else { post.append(c); } } else { break; } } if (post.length() > 0) { // Add new token followed by * not given by the tokeniser org.apache.lucene.analysis.Token newToken = new org.apache.lucene.analysis.Token( post.toString(), index + 1, index + 1 + post.length()); newToken.setType("ALPHANUM"); if (requiresMLTokenDuplication) { Locale locale = I18NUtil.parseLocale(localeString); MLTokenDuplicator duplicator = new MLTokenDuplicator(locale, MLAnalysisMode.EXACT_LANGUAGE); Iterator<org.apache.lucene.analysis.Token> it = duplicator.buildIterator(newToken); if (it != null) { int count = 0; while (it.hasNext()) { list.add(it.next()); count++; if (count > 1) { severalTokensAtSamePosition = true; } } } } // content else { list.add(newToken); } } } } } // Put in real position increments as we treat them correctly int curentIncrement = -1; for (org.apache.lucene.analysis.Token c : list) { if (curentIncrement == -1) { curentIncrement = c.getPositionIncrement(); } else if (c.getPositionIncrement() > 0) { curentIncrement = c.getPositionIncrement(); } else { c.setPositionIncrement(curentIncrement); } } // Remove small bits already covered in larger fragments list = getNonContained(list); Collections.sort(list, new Comparator<org.apache.lucene.analysis.Token>() { public int compare(Token o1, Token o2) { int dif = o1.startOffset() - o2.startOffset(); return dif; } }); // Combined * and ? based strings - should redo the tokeniser // Build tokens by position LinkedList<LinkedList<org.apache.lucene.analysis.Token>> tokensByPosition = new LinkedList<LinkedList<org.apache.lucene.analysis.Token>>(); LinkedList<org.apache.lucene.analysis.Token> currentList = null; int lastStart = 0; for (org.apache.lucene.analysis.Token c : list) { if (c.startOffset() == lastStart) { if (currentList == null) { currentList = new LinkedList<org.apache.lucene.analysis.Token>(); tokensByPosition.add(currentList); } currentList.add(c); } else { currentList = new LinkedList<org.apache.lucene.analysis.Token>(); tokensByPosition.add(currentList); currentList.add(c); } lastStart = c.startOffset(); } // Build all the token sequences and see which ones get strung together OrderedHashSet<LinkedList<org.apache.lucene.analysis.Token>> allTokenSequencesSet = new OrderedHashSet<LinkedList<org.apache.lucene.analysis.Token>>(); for (LinkedList<org.apache.lucene.analysis.Token> tokensAtPosition : tokensByPosition) { OrderedHashSet<LinkedList<org.apache.lucene.analysis.Token>> positionalSynonymSequencesSet = new OrderedHashSet<LinkedList<org.apache.lucene.analysis.Token>>(); OrderedHashSet<LinkedList<org.apache.lucene.analysis.Token>> newAllTokenSequencesSet = new OrderedHashSet<LinkedList<org.apache.lucene.analysis.Token>>(); FOR_FIRST_TOKEN_AT_POSITION_ONLY: for (org.apache.lucene.analysis.Token t : tokensAtPosition) { org.apache.lucene.analysis.Token replace = new org.apache.lucene.analysis.Token(t, t.startOffset(), t.endOffset()); replace.setType(t.type()); replace.setPositionIncrement(t.getPositionIncrement()); boolean tokenFoundSequence = false; for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : allTokenSequencesSet) { LinkedList<org.apache.lucene.analysis.Token> newEntry = new LinkedList<org.apache.lucene.analysis.Token>(); newEntry.addAll(tokenSequence); if ((newEntry.getLast().endOffset() == replace.endOffset()) && replace.type().equals(SynonymFilter.TYPE_SYNONYM)) { if ((newEntry.getLast().startOffset() == replace.startOffset()) && newEntry.getLast().type().equals(SynonymFilter.TYPE_SYNONYM)) { positionalSynonymSequencesSet.add(tokenSequence); newEntry.add(replace); tokenFoundSequence = true; } else if (newEntry.getLast().type().equals(CommonGramsFilter.GRAM_TYPE)) { if (newEntry.toString().endsWith(replace.toString())) { // already in the gram positionalSynonymSequencesSet.add(tokenSequence); tokenFoundSequence = true; } else { // need to replace the synonym in the current gram tokenFoundSequence = true; StringBuffer old = new StringBuffer(newEntry.getLast().toString()); old.replace(replace.startOffset() - newEntry.getLast().startOffset(), replace.endOffset() - newEntry.getLast().startOffset(), replace.toString()); Token newToken = new org.apache.lucene.analysis.Token(old.toString(), newEntry.getLast().startOffset(), newEntry.getLast().endOffset()); newEntry.removeLast(); newEntry.add(newToken); } } } else if ((newEntry.getLast().startOffset() < replace.startOffset()) && (newEntry.getLast().endOffset() < replace.endOffset())) { if (newEntry.getLast().type().equals(SynonymFilter.TYPE_SYNONYM) && replace.type().equals(SynonymFilter.TYPE_SYNONYM)) { positionalSynonymSequencesSet.add(tokenSequence); } newEntry.add(replace); tokenFoundSequence = true; } newAllTokenSequencesSet.add(newEntry); } if (false == tokenFoundSequence) { for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : newAllTokenSequencesSet) { LinkedList<org.apache.lucene.analysis.Token> newEntry = new LinkedList<org.apache.lucene.analysis.Token>(); newEntry.addAll(tokenSequence); if ((newEntry.getLast().endOffset() == replace.endOffset()) && replace.type().equals(SynonymFilter.TYPE_SYNONYM)) { if ((newEntry.getLast().startOffset() == replace.startOffset()) && newEntry.getLast().type().equals(SynonymFilter.TYPE_SYNONYM)) { positionalSynonymSequencesSet.add(tokenSequence); newEntry.add(replace); tokenFoundSequence = true; } else if (newEntry.getLast().type().equals(CommonGramsFilter.GRAM_TYPE)) { if (newEntry.toString().endsWith(replace.toString())) { // already in the gram positionalSynonymSequencesSet.add(tokenSequence); tokenFoundSequence = true; } else { // need to replace the synonym in the current gram tokenFoundSequence = true; StringBuffer old = new StringBuffer(newEntry.getLast().toString()); old.replace(replace.startOffset() - newEntry.getLast().startOffset(), replace.endOffset() - newEntry.getLast().startOffset(), replace.toString()); Token newToken = new org.apache.lucene.analysis.Token(old.toString(), newEntry.getLast().startOffset(), newEntry.getLast().endOffset()); newEntry.removeLast(); newEntry.add(newToken); positionalSynonymSequencesSet.add(newEntry); } } } else if ((newEntry.getLast().startOffset() < replace.startOffset()) && (newEntry.getLast().endOffset() < replace.endOffset())) { if (newEntry.getLast().type().equals(SynonymFilter.TYPE_SYNONYM) && replace.type().equals(SynonymFilter.TYPE_SYNONYM)) { positionalSynonymSequencesSet.add(tokenSequence); newEntry.add(replace); tokenFoundSequence = true; } } } } if (false == tokenFoundSequence) { LinkedList<org.apache.lucene.analysis.Token> newEntry = new LinkedList<org.apache.lucene.analysis.Token>(); newEntry.add(replace); newAllTokenSequencesSet.add(newEntry); } // Limit the max number of permutations we consider if (newAllTokenSequencesSet.size() > 64) { break FOR_FIRST_TOKEN_AT_POSITION_ONLY; } } allTokenSequencesSet = newAllTokenSequencesSet; allTokenSequencesSet.addAll(positionalSynonymSequencesSet); } LinkedList<LinkedList<org.apache.lucene.analysis.Token>> allTokenSequences = new LinkedList<LinkedList<org.apache.lucene.analysis.Token>>( allTokenSequencesSet); // build the unique LinkedList<LinkedList<org.apache.lucene.analysis.Token>> fixedTokenSequences = new LinkedList<LinkedList<org.apache.lucene.analysis.Token>>(); for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : allTokenSequences) { LinkedList<org.apache.lucene.analysis.Token> fixedTokenSequence = new LinkedList<org.apache.lucene.analysis.Token>(); fixedTokenSequences.add(fixedTokenSequence); org.apache.lucene.analysis.Token replace = null; for (org.apache.lucene.analysis.Token c : tokenSequence) { if (replace == null) { StringBuilder prefix = new StringBuilder(); for (int i = c.startOffset() - 1; i >= 0; i--) { char test = testText.charAt(i); if (((test == '*') || (test == '?')) && wildcardPoistions.contains(i)) { prefix.insert(0, test); } else { break; } } String pre = prefix.toString(); if (requiresMLTokenDuplication) { String termText = c.toString(); int position = termText.indexOf("}"); String language = termText.substring(0, position + 1); String token = termText.substring(position + 1); replace = new org.apache.lucene.analysis.Token(language + pre + token, c.startOffset() - pre.length(), c.endOffset()); replace.setType(c.type()); replace.setPositionIncrement(c.getPositionIncrement()); } else { String termText = c.toString(); replace = new org.apache.lucene.analysis.Token(pre + termText, c.startOffset() - pre.length(), c.endOffset()); replace.setType(c.type()); replace.setPositionIncrement(c.getPositionIncrement()); } } else { StringBuilder prefix = new StringBuilder(); StringBuilder postfix = new StringBuilder(); StringBuilder builder = prefix; for (int i = c.startOffset() - 1; i >= replace.endOffset(); i--) { char test = testText.charAt(i); if (((test == '*') || (test == '?')) && wildcardPoistions.contains(i)) { builder.insert(0, test); } else { builder = postfix; postfix.setLength(0); } } String pre = prefix.toString(); String post = postfix.toString(); // Does it bridge? if ((pre.length() > 0) && (replace.endOffset() + pre.length()) == c.startOffset()) { String termText = c.toString(); if (requiresMLTokenDuplication) { int position = termText.indexOf("}"); @SuppressWarnings("unused") String language = termText.substring(0, position + 1); String token = termText.substring(position + 1); int oldPositionIncrement = replace.getPositionIncrement(); String replaceTermText = replace.toString(); replace = new org.apache.lucene.analysis.Token(replaceTermText + pre + token, replace.startOffset(), c.endOffset()); replace.setType(replace.type()); replace.setPositionIncrement(oldPositionIncrement); } else { int oldPositionIncrement = replace.getPositionIncrement(); String replaceTermText = replace.toString(); replace = new org.apache.lucene.analysis.Token(replaceTermText + pre + termText, replace.startOffset(), c.endOffset()); replace.setType(replace.type()); replace.setPositionIncrement(oldPositionIncrement); } } else { String termText = c.toString(); if (requiresMLTokenDuplication) { int position = termText.indexOf("}"); String language = termText.substring(0, position + 1); String token = termText.substring(position + 1); String replaceTermText = replace.toString(); org.apache.lucene.analysis.Token last = new org.apache.lucene.analysis.Token( replaceTermText + post, replace.startOffset(), replace.endOffset() + post.length()); last.setType(replace.type()); last.setPositionIncrement(replace.getPositionIncrement()); fixedTokenSequence.add(last); replace = new org.apache.lucene.analysis.Token(language + pre + token, c.startOffset() - pre.length(), c.endOffset()); replace.setType(c.type()); replace.setPositionIncrement(c.getPositionIncrement()); } else { String replaceTermText = replace.toString(); org.apache.lucene.analysis.Token last = new org.apache.lucene.analysis.Token( replaceTermText + post, replace.startOffset(), replace.endOffset() + post.length()); last.setType(replace.type()); last.setPositionIncrement(replace.getPositionIncrement()); fixedTokenSequence.add(last); replace = new org.apache.lucene.analysis.Token(pre + termText, c.startOffset() - pre.length(), c.endOffset()); replace.setType(c.type()); replace.setPositionIncrement(c.getPositionIncrement()); } } } } // finish last if (replace != null) { StringBuilder postfix = new StringBuilder(); if ((replace.endOffset() >= 0) && (replace.endOffset() < testText.length())) { for (int i = replace.endOffset(); i < testText.length(); i++) { char test = testText.charAt(i); if (((test == '*') || (test == '?')) && wildcardPoistions.contains(i)) { postfix.append(test); } else { break; } } } String post = postfix.toString(); int oldPositionIncrement = replace.getPositionIncrement(); String replaceTermText = replace.toString(); replace = new org.apache.lucene.analysis.Token(replaceTermText + post, replace.startOffset(), replace.endOffset() + post.length()); replace.setType(replace.type()); replace.setPositionIncrement(oldPositionIncrement); fixedTokenSequence.add(replace); } } // rebuild fixed list ArrayList<org.apache.lucene.analysis.Token> fixed = new ArrayList<org.apache.lucene.analysis.Token>(); for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : fixedTokenSequences) { for (org.apache.lucene.analysis.Token token : tokenSequence) { fixed.add(token); } } // reorder by start position and increment Collections.sort(fixed, new Comparator<org.apache.lucene.analysis.Token>() { public int compare(Token o1, Token o2) { int dif = o1.startOffset() - o2.startOffset(); if (dif != 0) { return dif; } else { return o1.getPositionIncrement() - o2.getPositionIncrement(); } } }); // make sure we remove any tokens we have duplicated @SuppressWarnings("rawtypes") OrderedHashSet unique = new OrderedHashSet(); unique.addAll(fixed); fixed = new ArrayList<org.apache.lucene.analysis.Token>(unique); list = fixed; // add any missing locales back to the tokens if (localePrefix.length() > 0) { for (int j = 0; j < list.size(); j++) { org.apache.lucene.analysis.Token currentToken = list.get(j); String termText = currentToken.toString(); currentToken.setEmpty(); currentToken.append(localePrefix + termText); } } SchemaField sf = schema.getField(field); TokenizerChain tokenizerChain = (sf.getType().getQueryAnalyzer() instanceof TokenizerChain) ? ((TokenizerChain) sf.getType().getQueryAnalyzer()) : null; boolean isShingled = false; if (tokenizerChain != null) { for (TokenFilterFactory factory : tokenizerChain.getTokenFilterFactories()) { if (factory instanceof ShingleFilterFactory) { isShingled = true; break; } } } AlfrescoAnalyzerWrapper analyzerWrapper = (sf.getType() .getQueryAnalyzer() instanceof AlfrescoAnalyzerWrapper) ? ((AlfrescoAnalyzerWrapper) sf.getType().getQueryAnalyzer()) : null; if (analyzerWrapper != null) { // assume if there are no term positions it is shingled .... isShingled = true; } boolean forceConjuncion = rerankPhase == RerankPhase.QUERY_PHASE; if (list.size() == 0) return null; else if (list.size() == 1) { nextToken = list.get(0); String termText = nextToken.toString(); if (!isNumeric && (termText.contains("*") || termText.contains("?"))) { return newWildcardQuery(new Term(field, termText)); } else { return newTermQuery(new Term(field, termText)); } } else { if (severalTokensAtSamePosition) { if (positionCount == 1) { // no phrase query: BooleanQuery q = newBooleanQuery(true); for (int i = 0; i < list.size(); i++) { Query currentQuery; nextToken = list.get(i); String termText = nextToken.toString(); if (termText.contains("*") || termText.contains("?")) { currentQuery = newWildcardQuery(new Term(field, termText)); } else { currentQuery = newTermQuery(new Term(field, termText)); } q.add(currentQuery, BooleanClause.Occur.SHOULD); } return q; } else if (forceConjuncion) { BooleanQuery or = new BooleanQuery(); for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : fixedTokenSequences) { BooleanQuery and = new BooleanQuery(); for (int i = 0; i < tokenSequence.size(); i++) { nextToken = (org.apache.lucene.analysis.Token) tokenSequence.get(i); String termText = nextToken.toString(); Term term = new Term(field, termText); if ((termText != null) && (termText.contains("*") || termText.contains("?"))) { org.apache.lucene.search.WildcardQuery wildQuery = new org.apache.lucene.search.WildcardQuery( term); and.add(wildQuery, Occur.MUST); } else { TermQuery termQuery = new TermQuery(term); and.add(termQuery, Occur.MUST); } } if (and.clauses().size() > 0) { or.add(and, Occur.SHOULD); } } return or; } // shingle else if (sf.omitPositions() && isShingled) { ArrayList<org.apache.lucene.analysis.Token> nonContained = getNonContained(list); Query currentQuery; BooleanQuery weakPhrase = new BooleanQuery(); for (org.apache.lucene.analysis.Token shingleToken : nonContained) { String termText = shingleToken.toString(); Term term = new Term(field, termText); if ((termText != null) && (termText.contains("*") || termText.contains("?"))) { currentQuery = new org.apache.lucene.search.WildcardQuery(term); } else { currentQuery = new TermQuery(term); } weakPhrase.add(currentQuery, Occur.MUST); } return weakPhrase; } // Consider if we can use a multi-phrase query (e.g for synonym use rather then WordDelimiterFilterFactory) else if (canUseMultiPhraseQuery(fixedTokenSequences)) { // phrase query: MultiPhraseQuery mpq = newMultiPhraseQuery(); mpq.setSlop(internalSlop); ArrayList<Term> multiTerms = new ArrayList<Term>(); int position = 0; for (int i = 0; i < list.size(); i++) { nextToken = list.get(i); String termText = nextToken.toString(); Term term = new Term(field, termText); if ((termText != null) && (termText.contains("*") || termText.contains("?"))) { throw new IllegalStateException("Wildcards are not allowed in multi phrase anymore"); } else { multiTerms.add(term); } if (nextToken.getPositionIncrement() > 0 && multiTerms.size() > 0) { if (getEnablePositionIncrements()) { mpq.add(multiTerms.toArray(new Term[0]), position); } else { mpq.add(multiTerms.toArray(new Term[0])); } checkTermCount(field, queryText, mpq); multiTerms.clear(); } position += nextToken.getPositionIncrement(); } if (getEnablePositionIncrements()) { if (multiTerms.size() > 0) { mpq.add(multiTerms.toArray(new Term[0]), position); } // else // { // mpq.add(new Term[] { new Term(field, "\u0000") }, position); // } } else { if (multiTerms.size() > 0) { mpq.add(multiTerms.toArray(new Term[0])); } // else // { // mpq.add(new Term[] { new Term(field, "\u0000") }); // } } checkTermCount(field, queryText, mpq); return mpq; } // Word delimiter factory and other odd things generate complex token patterns // Smart skip token sequences with small tokens that generate toomany wildcards // Fall back to the larger pattern // e.g Site1* will not do (S ite 1*) or (Site 1*) if 1* matches too much (S ite1*) and (Site1*) will still be OK // If we skip all (for just 1* in the input) this is still an issue. else { return generateSpanOrQuery(field, fixedTokenSequences); } } else { if (forceConjuncion) { BooleanQuery or = new BooleanQuery(); for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : fixedTokenSequences) { BooleanQuery and = new BooleanQuery(); for (int i = 0; i < tokenSequence.size(); i++) { nextToken = (org.apache.lucene.analysis.Token) tokenSequence.get(i); String termText = nextToken.toString(); Term term = new Term(field, termText); if ((termText != null) && (termText.contains("*") || termText.contains("?"))) { org.apache.lucene.search.WildcardQuery wildQuery = new org.apache.lucene.search.WildcardQuery( term); and.add(wildQuery, Occur.MUST); } else { TermQuery termQuery = new TermQuery(term); and.add(termQuery, Occur.MUST); } } if (and.clauses().size() > 0) { or.add(and, Occur.SHOULD); } } return or; } else { SpanQuery spanQuery = null; SpanOrQuery atSamePosition = new SpanOrQuery(); int gap = 0; for (int i = 0; i < list.size(); i++) { nextToken = list.get(i); String termText = nextToken.toString(); Term term = new Term(field, termText); if (getEnablePositionIncrements()) { SpanQuery nextSpanQuery; if ((termText != null) && (termText.contains("*") || termText.contains("?"))) { org.apache.lucene.search.WildcardQuery wildQuery = new org.apache.lucene.search.WildcardQuery( term); SpanMultiTermQueryWrapper wrapper = new SpanMultiTermQueryWrapper<>(wildQuery); wrapper.setRewriteMethod( new TopTermsSpanBooleanQueryRewrite(topTermSpanRewriteLimit)); nextSpanQuery = wrapper; } else { nextSpanQuery = new SpanTermQuery(term); } if (gap == 0) { atSamePosition.addClause(nextSpanQuery); } else { if (atSamePosition.getClauses().length == 0) { if (spanQuery == null) { spanQuery = nextSpanQuery; } else { spanQuery = new SpanNearQuery(new SpanQuery[] { spanQuery, nextSpanQuery }, (gap - 1) + internalSlop, internalSlop < 2); } atSamePosition = new SpanOrQuery(); } else if (atSamePosition.getClauses().length == 1) { if (spanQuery == null) { spanQuery = atSamePosition.getClauses()[0]; } else { spanQuery = new SpanNearQuery( new SpanQuery[] { spanQuery, atSamePosition.getClauses()[0] }, (gap - 1) + internalSlop, internalSlop < 2); } atSamePosition = new SpanOrQuery(); atSamePosition.addClause(nextSpanQuery); } else { if (spanQuery == null) { spanQuery = atSamePosition; } else { spanQuery = new SpanNearQuery(new SpanQuery[] { spanQuery, atSamePosition }, (gap - 1) + internalSlop, internalSlop < 2); } atSamePosition = new SpanOrQuery(); atSamePosition.addClause(nextSpanQuery); } } gap = nextToken.getPositionIncrement(); } else { SpanQuery nextSpanQuery; if ((termText != null) && (termText.contains("*") || termText.contains("?"))) { org.apache.lucene.search.WildcardQuery wildQuery = new org.apache.lucene.search.WildcardQuery( term); SpanMultiTermQueryWrapper wrapper = new SpanMultiTermQueryWrapper<>(wildQuery); wrapper.setRewriteMethod( new TopTermsSpanBooleanQueryRewrite(topTermSpanRewriteLimit)); nextSpanQuery = wrapper; } else { nextSpanQuery = new SpanTermQuery(term); } if (spanQuery == null) { spanQuery = new SpanOrQuery(); ((SpanOrQuery) spanQuery).addClause(nextSpanQuery); } else { ((SpanOrQuery) spanQuery).addClause(nextSpanQuery); } } } if (atSamePosition.getClauses().length == 0) { return spanQuery; } else if (atSamePosition.getClauses().length == 1) { if (spanQuery == null) { spanQuery = atSamePosition.getClauses()[0]; } else { spanQuery = new SpanNearQuery( new SpanQuery[] { spanQuery, atSamePosition.getClauses()[0] }, (gap - 1) + internalSlop, internalSlop < 2); } return spanQuery; } else { if (spanQuery == null) { spanQuery = atSamePosition; } else { spanQuery = new SpanNearQuery(new SpanQuery[] { spanQuery, atSamePosition }, (gap - 1) + internalSlop, internalSlop < 2); } return spanQuery; } } } } }