List of usage examples for java.util.regex Pattern quote
public static String quote(String s)
From source file:com.ejisto.util.IOUtils.java
private static String translatePath(String in, String separator) { return in.replaceAll(Pattern.quote(separator), "."); }
From source file:org.shredzone.commons.view.manager.ViewPattern.java
/** * Compiles a view pattern. Generates a parameter list, a list of expressions for * building URLs to this view, and a regular expression for matching URLs against this * view pattern.// w ww . ja va 2s . com * * @param pstr * the view pattern * @param pattern * {@link StringBuilder} to assemble the regular expression in * @param expList * List of {@link Expression} to assemble expressions in * @param paramList * List to assemble parameters in */ private void compilePattern(String pstr, StringBuilder pattern, List<Expression> expList, List<String> paramList) { ExpressionParser parser = new SpelExpressionParser(); int previous = 0; Matcher m = PATH_PART.matcher(pstr); while (m.find()) { String fixedPart = pstr.substring(previous, m.start()); if (fixedPart.indexOf('\'') >= 0) { throw new IllegalArgumentException("path parameters must not contain \"'\""); } String expressionPart = m.group(1); pattern.append(Pattern.quote(fixedPart)); pattern.append("([^/]*)"); paramList.add(expressionPart); expList.add(parser.parseExpression('\'' + fixedPart + '\'')); expList.add(parser.parseExpression(expressionPart)); previous = m.end(); } String postPart = pstr.substring(previous); pattern.append(Pattern.quote(postPart)); expList.add(parser.parseExpression('\'' + postPart + '\'')); }
From source file:cn.cnic.bigdatalab.flume.sink.mongodb.EventParser.java
private DBObject populateDocument(DocumentFieldDefinition fd, String document) { DBObject dbObject = null;//from w ww . j a v a 2 s . co m final String delimiter = fd.getDelimiter(); if (!StringUtils.isEmpty(delimiter)) { String[] documentAsArrray = document.split(Pattern.quote(delimiter)); dbObject = new BasicDBObject(); Map<String, FieldDefinition> documentMapping = new LinkedHashMap<String, FieldDefinition>( fd.getDocumentMapping()); int i = 0; for (Map.Entry<String, FieldDefinition> documentField : documentMapping.entrySet()) { if (DOCUMENT_TYPE.equalsIgnoreCase(documentField.getValue().getType().name())) { dbObject.put(documentField.getKey(), parseValue(documentField.getValue(), StringUtils.join( Arrays.copyOfRange(documentAsArrray, i, documentAsArrray.length), fd.getDelimiter()))); i += ((DocumentFieldDefinition) documentField.getValue()).getDocumentMapping().size(); } else { dbObject.put(documentField.getKey(), parseValue(documentField.getValue(), documentAsArrray[i++])); } } } else { throw new MongoSinkException("Delimiter char must be set"); } return dbObject; }
From source file:net.sf.yal10n.DetectChangesMojo.java
void createAndSendEmail(DashboardConfiguration config, Repository repo, String projectName, String viewvcDiff, UnifiedDiff unifiedDiff) {//from w ww.j av a 2 s.c o m Properties props = new Properties(); props.put("mail.smtp.host", config.getNotification().getSmtpServer()); props.put("mail.smtp.port", config.getNotification().getSmtpPort()); InternetAddress from; try { from = new InternetAddress(config.getNotification().getMailFrom()); List<Address> recipients = config.getNotification().getRecipientsAddresses(); recipients.addAll(repo.getNotification().getRecipientsAddresses()); String subject = config.getNotification().getSubject().replaceAll(Pattern.quote("{{projectName}}"), projectName); String content = "<html><body>Changes detected in <strong>" + projectName + "</strong><br>" + "<p>See here: <a href=\"" + viewvcDiff + "\">" + viewvcDiff + "</a></p>" + "<br>" + "<strong>Diff output:</strong><br>" + unifiedDiff.asHtmlSnippet() + "</body></html>"; emailer.sendEmail(skipEmail, props, from, recipients, subject, content, projectName); } catch (AddressException e) { throw new RuntimeException(e); } }
From source file:gate.util.reporting.PRTimeReporter.java
/** * Stores GATE processing elements and the time taken by them in an in-memory * data structure for report generation. * * @param inputFile/* w w w .ja v a 2s . co m*/ * A File handle of the input log file. * * @return An Object of type LinkedHashMap<String, Object> containing the * processing elements (with time in milliseconds) in hierarchical * structure. Null if there was an error. */ @Override public Object store(File inputFile) throws BenchmarkReportInputFileFormatException { LinkedHashMap<String, Object> globalStore = new LinkedHashMap<String, Object>(); long fromPos = 0; RandomAccessFile in = null; try { if (getLogicalStart() != null) { fromPos = tail(inputFile, FILE_CHUNK_SIZE); } in = new RandomAccessFile(inputFile, "r"); if (getLogicalStart() != null) { in.seek(fromPos); } ArrayList<String> startTokens = new ArrayList<String>(); String logEntry; String docName = null; Pattern pattern = Pattern.compile("(\\d+) (\\d+) (.*) (.*) \\{(.*)\\}"); while ((logEntry = in.readLine()) != null) { Matcher matcher = pattern.matcher(logEntry); // Skip the statistics for the event documentLoaded if (logEntry.matches(".*documentLoaded.*")) continue; if (logEntry.matches(".*START.*")) { String[] splittedStartEntry = logEntry.split("\\s"); String startToken = (splittedStartEntry.length > 2) ? splittedStartEntry[2] : null; if (startToken == null) { throw new BenchmarkReportInputFileFormatException( getBenchmarkFile().getAbsolutePath() + " is invalid."); } startTokens.add(startToken); if (startToken.endsWith("Start")) continue; organizeEntries(globalStore, startToken.split("\\."), "0"); } if (matcher != null) { if (matcher.matches()) { if (validateLogEntry(matcher.group(3), startTokens)) { String[] splittedBIDs = matcher.group(3).split("\\."); if (splittedBIDs.length > 1) { docName = splittedBIDs[1]; pipelineNames.add(splittedBIDs[0]); } organizeEntries(globalStore, (matcher.group(3).replaceFirst(Pattern.quote(docName) + ".", "")).split("\\."), matcher.group(2)); } } } } } catch (IOException e) { e.printStackTrace(); globalStore = null; } finally { try { if (in != null) { in.close(); } } catch (IOException e) { e.printStackTrace(); globalStore = null; } } if (validEntries == 0) { if (logicalStart != null) { throw new BenchmarkReportInputFileFormatException( "No valid log entries present in " + getBenchmarkFile().getAbsolutePath() + " does not contain a marker named " + logicalStart + "."); } else { throw new BenchmarkReportInputFileFormatException( "No valid log entries present in " + getBenchmarkFile().getAbsolutePath()); } } return globalStore; }
From source file:de.uni_tuebingen.ub.ixTheo.handler.component.FacetPrefixSortComponent.java
/** * Actually run the query/* ww w . j a v a 2 s . c o m*/ */ @Override public void process(ResponseBuilder rb) throws IOException { if (rb.doFacets) { final ModifiableSolrParams params = new ModifiableSolrParams(); final SolrParams origParams = rb.req.getParams(); final Iterator<String> iter = origParams.getParameterNamesIterator(); setCollator(origParams.get("lang")); while (iter.hasNext()) { final String paramName = iter.next(); // Deduplicate the list with LinkedHashSet, but _only_ for facet // params. if (!paramName.startsWith(FacetParams.FACET)) { params.add(paramName, origParams.getParams(paramName)); continue; } final HashSet<String> deDupe = new LinkedHashSet<>(Arrays.asList(origParams.getParams(paramName))); params.add(paramName, deDupe.toArray(new String[deDupe.size()])); } final SimplePrefixSortFacets facets = new SimplePrefixSortFacets(rb.req, rb.getResults().docSet, params, rb); final NamedList<Object> counts = org.apache.solr.handler.component.FacetComponent .getFacetCounts(facets); final String[] pivots = params.getParams(FacetParams.FACET_PIVOT); if (pivots != null && pivots.length > 0) { PivotFacetProcessor pivotProcessor = new PivotFacetProcessor(rb.req, rb.getResults().docSet, params, rb); SimpleOrderedMap<List<NamedList<Object>>> v = pivotProcessor.process(pivots); if (v != null) { counts.add(PIVOT_KEY, v); } } // Check whether we have to reorder out results // according to prefix final String sort = params.get(FacetParams.FACET_SORT); if (FacetPrefixSortParams.FACET_SORT_PREFIX.equals(sort)) { // Determine a score relative to the original query // Determine the query and make it compatible with our metric // class // by splitting the single terms String[] queryTerms = params.getParams(CommonParams.Q); final Collection<String> queryTermsCollection = new ArrayList<>(); for (String s : queryTerms) { // Split at whitespace except we have a quoted term Matcher matcher = WHITE_SPACES_WITH_QUOTES_SPLITTING_PATTERN.matcher(s); while (matcher.find()) { queryTermsCollection.add(matcher.group().replaceAll("^\"|\"$", "")); } } // In some contexts, i.e. in KWC that are derived from ordinary // keywords or if // wildcards occur, also add all the query terms as a single // phrase term // with stripped wildcards StringBuilder sb = new StringBuilder(); for (String s : queryTermsCollection) { s = s.replace("*", ""); sb.append(s); sb.append(" "); } queryTermsCollection.add(sb.toString().trim()); final ArrayList<String> queryList = new ArrayList<>(queryTermsCollection); final String facetfield = params.get(FacetParams.FACET_FIELD); // Get the current facet entry and make it compatible with our // metric class // "facet_fields" itself contains a NamedList with the // facet.field as key final NamedList<Object> facetFieldsNamedList = (NamedList<Object>) counts.get("facet_fields"); final NamedList<Object> facetFields = (NamedList<Object>) facetFieldsNamedList.get(facetfield); final List<Entry<Entry<String, Object>, Double>> facetPrefixListScored = new ArrayList<>(); for (final Entry<String, Object> entry : facetFields) { final String facetTerms = entry.getKey(); // Split up each KWC and calculate the scoring ArrayList<String> facetList = new ArrayList<>( Arrays.asList(facetTerms.split("(?<!" + Pattern.quote("\\") + ")/"))); // For usability reasons sort the result facets according to // the order of the search facetList = KeywordSort.sortToReferenceChain(queryList, facetList); final double score = KeywordChainMetric.calculateSimilarityScore(queryList, facetList); // Collect the result in a sorted list and throw away // garbage if (score > 0) { String facetTermsSorted = StringUtils.join(facetList, "/"); Map.Entry<String, Object> sortedEntry = new AbstractMap.SimpleEntry<>(facetTermsSorted, entry.getValue()); facetPrefixListScored.add(new AbstractMap.SimpleEntry<>(sortedEntry, score)); } } Collections.sort(facetPrefixListScored, ENTRY_COMPARATOR); // Extract all the values wrap it back to NamedList again and // replace in the original structure facetFieldsNamedList.clear(); NamedList<Object> facetNamedListSorted = new NamedList<>(); // We had to disable all limits and offsets sort according // Handle this accordingly now int offset = (params.getInt(FacetParams.FACET_OFFSET) != null) ? params.getInt(FacetParams.FACET_OFFSET) : 0; int limit = (params.getInt(FacetParams.FACET_LIMIT) != null) ? params.getInt(FacetParams.FACET_LIMIT) : 100; // Strip uneeded elements int s = facetPrefixListScored.size(); int off = (offset < s) ? offset : 0; limit = (limit < 0) ? s : limit; // Handle a negative limit // param, i.e. unlimited results int lim = (offset + limit <= s) ? (offset + limit) : s; final List<Entry<Entry<String, Object>, Double>> facetPrefixListScoredTruncated = facetPrefixListScored .subList(off, lim); for (Entry<Entry<String, Object>, Double> e : facetPrefixListScoredTruncated) { facetNamedListSorted.add(e.getKey().getKey(), e.getKey().getValue()); } facetFieldsNamedList.add(facetfield, facetNamedListSorted); NamedList<Object> countList = new NamedList<>(); countList.add("count", facetPrefixListScored.size()); facetFieldsNamedList.add(facetfield + "-count", countList); counts.remove("facet_fields"); counts.add("facet_fields", facetFieldsNamedList); } rb.rsp.add("facet_counts", counts); } }
From source file:org.loklak.api.iot.GeoJsonPushServlet.java
/** * For each member m in properties, if it exists in mapRules, perform these conversions : * - m:c -> keep value, change key m to c * - m:c.d -> insert/update json object of key c with a value {d : value} * @param mapRules/* w w w . j ava2s .c o m*/ * @param properties * @return mappedProperties */ private JSONObject convertMapRulesProperties(Map<String, List<String>> mapRules, JSONObject properties) { JSONObject root = new JSONObject(true); for (String key : properties.keySet()) { if (mapRules.containsKey(key)) { for (String newField : mapRules.get(key)) { if (newField.contains(".")) { String[] deepFields = newField.split(Pattern.quote(".")); JSONObject currentLevel = root; for (int lvl = 0; lvl < deepFields.length; lvl++) { if (lvl == deepFields.length - 1) { currentLevel.put(deepFields[lvl], properties.get(key)); } else { if (currentLevel.get(deepFields[lvl]) == null) { JSONObject tmp = new JSONObject(); currentLevel.put(deepFields[lvl], tmp); } currentLevel = (JSONObject) currentLevel.get(deepFields[lvl]); } } } else { root.put(newField, properties.get(key)); } } } } return root; }
From source file:org.yamj.core.service.mediaimport.FilenameScanner.java
public FilenameScanner() { // resolve extensions videoExtensions = StringTools.tokenize(PropertyTools.getProperty("filename.scanner.video.extensions", "avi,divx,xvid,mkv,wmv,m2ts,ts,rm,qt,iso,vob,mpg,mov,mp4,m1v,m2v,m4v,m2p,top,trp,m2t,mts,asf,rmp4,img,mk3d,rar,001"), ",;|"); subtitleExtensions = StringTools.tokenize( PropertyTools.getProperty("filename.scanner.subtitle.extensions", "srt,sub,ssa,smi,pgs"), ",;|"); imageExtensions = StringTools.tokenize( PropertyTools.getProperty("filename.scanner.image.extensions", "jpg,jpeg,gif,bmp,png"), ",;|"); // other properties languageDetection = PropertyTools.getBooleanProperty("filename.scanner.language.detection", Boolean.TRUE); skipEpisodeTitle = PropertyTools.getBooleanProperty("filename.scanner.skip.episodeTitle", Boolean.FALSE); // parent patterns useParentRegex = PropertyTools.getBooleanProperty("filename.scanner.useParentRegex", Boolean.FALSE); String patternString = PropertyTools.getProperty("filename.scanner.parentRegex", ""); if (StringUtils.isNotBlank(patternString)) { useParentPattern = PatternUtils.ipatt(patternString); } else {/*from w w w . j a va 2 s. c o m*/ useParentRegex = Boolean.FALSE; } // build the skip patterns boolean caseSensitive = PropertyTools.getBooleanProperty("filename.scanner.skip.caseSensitive", Boolean.TRUE); for (String token : tokenizeToStringArray(PropertyTools.getProperty("filename.scanner.skip.keywords", ""), ",;| ")) { if (caseSensitive) { skipPatterns.add(PatternUtils.wpatt(Pattern.quote(token))); } else { skipPatterns.add(PatternUtils.iwpatt(Pattern.quote(token))); } } caseSensitive = PropertyTools.getBooleanProperty("filename.scanner.skip.caseSensitive.regex", Boolean.TRUE); for (String token : tokenizeToStringArray( PropertyTools.getProperty("filename.scanner.skip.keywords.regex", ""), ",;| ")) { if (caseSensitive) { skipPatterns.add(PatternUtils.patt(token)); } else { skipPatterns.add(PatternUtils.ipatt(token)); } } // build version keywords pattern for (String token : tokenizeToStringArray(PropertyTools.getProperty("filename.scanner.version.keywords", "director's cut,directors cut,extended cut,final cut,remastered,extended version,special edition"), ",;|")) { movieVersionPatterns.add( PatternUtils.iwpatt(token.replace(" ", PatternUtils.WORD_DELIMITERS_MATCH_PATTERN.pattern()))); } // build extra keywords pattern for (String token : tokenizeToStringArray( PropertyTools.getProperty("filename.scanner.extra.keywords", "trailer,extra,bonus"), ",;|")) { extraPatterns.add(PatternUtils.pattInSBrackets(Pattern.quote(token))); } // set source keywords KeywordMap sourceKeywords = PropertyTools.getKeywordMap("filename.scanner.source.keywords", "HDTV,PDTV,DVDRip,DVDSCR,DSRip,CAM,R5,LINE,HD2DVD,DVD,DVD5,DVD9,HRHDTV,MVCD,VCD,TS,VHSRip,BluRay,BDRip,HDDVD,D-THEATER,SDTV"); videoSourceMap.putAll(sourceKeywords.getKeywords(), sourceKeywords); }
From source file:com.googlecode.jdeltasync.DeltaSyncClient.java
/** * Logs in using the specified username and password. Returns a * {@link IDeltaSyncSession} object on successful authentication. * * @param username the username.//from www. j a va 2s.com * @param password the password. * @return the session. * @throws AuthenticationException if authentication fails. * @throws DeltaSyncException on errors returned by the server. * @throws IOException on communication errors. */ @Override public IDeltaSyncSession login(String username, String password) throws AuthenticationException, DeltaSyncException, IOException { if (username == null) { throw new NullPointerException("username"); } if (password == null) { throw new NullPointerException("password"); } SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"); format.setTimeZone(TimeZone.getTimeZone("UTC")); Date created = new Date(); Date expires = new Date(created.getTime() + 5 * 60 * 1000); Document request = XmlUtil.parse(getClass().getResourceAsStream("login-request.xml")); Element elSecurity = XmlUtil.getElement(request, "/s:Envelope/s:Header/wsse:Security"); XmlUtil.setTextContent(elSecurity, "wsse:UsernameToken/wsse:Username", username); XmlUtil.setTextContent(elSecurity, "wsse:UsernameToken/wsse:Password", password); XmlUtil.setTextContent(elSecurity, "wsu:Timestamp/wsu:Created", format.format(created)); XmlUtil.setTextContent(elSecurity, "wsu:Timestamp/wsu:Expires", format.format(expires)); IDeltaSyncSession session = new DeltaSyncSession(username, password); if (session.getLogger().isDebugEnabled()) { session.getLogger().debug("Sending login request: {}", XmlUtil.toString(request, false).replaceAll(Pattern.quote(password), "******")); } Document response = post(session, LOGIN_BASE_URI, LOGIN_USER_AGENT, "application/soap+xml", request, new UriCapturingResponseHandler<Document>() { public Document handle(URI uri, HttpResponse response) throws DeltaSyncException, IOException { return XmlUtil.parse(response.getEntity().getContent()); } }); if (session.getLogger().isDebugEnabled()) { session.getLogger().debug("Received login response: {}", XmlUtil.toString(response, false)); } if (XmlUtil.hasElement(response, "/s:Envelope/s:Body/s:Fault")) { throw new AuthenticationException( XmlUtil.getTextContent(response, "/s:Envelope/s:Body/s:Fault/s:Reason/s:Text")); } String ticket = XmlUtil.getTextContent(response, "/s:Envelope/s:Body/wst:RequestSecurityTokenResponseCollection/" + "wst:RequestSecurityTokenResponse/wst:RequestedSecurityToken/wsse:BinarySecurityToken"); if (ticket == null) { String flowUrl = XmlUtil.getTextContent(response, "/s:Envelope/s:Body/wst:RequestSecurityTokenResponseCollection/" + "wst:RequestSecurityTokenResponse/psf:pp/psf:flowurl"); String requestStatus = XmlUtil.getTextContent(response, "/s:Envelope/s:Body/wst:RequestSecurityTokenResponseCollection/" + "wst:RequestSecurityTokenResponse/psf:pp/psf:reqstatus"); String errorStatus = XmlUtil.getTextContent(response, "/s:Envelope/s:Body/wst:RequestSecurityTokenResponseCollection/" + "wst:RequestSecurityTokenResponse/psf:pp/psf:errorstatus"); if (flowUrl != null || requestStatus != null || errorStatus != null) { throw new AuthenticationException(flowUrl, requestStatus, errorStatus); } throw new AuthenticationException("Uknown authentication failure"); } session.setTicket(ticket); session.setBaseUri(DS_BASE_URI); return session; }
From source file:net.oauth.jsontoken.JsonTokenParser.java
/** * @param tokenString The original encoded representation of a JWT * @return Three components of the JWT as an array of strings *///from ww w.java2 s . c om private String[] splitTokenString(String tokenString) { String[] pieces = tokenString.split(Pattern.quote(JsonTokenUtil.DELIMITER)); if (pieces.length != 3) { throw new IllegalStateException("Expected JWT to have 3 segments separated by '" + JsonTokenUtil.DELIMITER + "', but it has " + pieces.length + " segments"); } return pieces; }