List of usage examples for java.util ArrayList contains
public boolean contains(Object o)
From source file:com.vangent.hieos.services.xds.repository.transactions.ProvideAndRegisterDocumentSet.java
/** * * @param pnr/*w ww. ja v a 2 s. c om*/ * @param m * @throws com.vangent.hieos.xutil.exception.XDSMissingDocumentException * @throws com.vangent.hieos.xutil.exception.XDSMissingDocumentMetadataException */ private void validate_docs_and_metadata_b(OMElement pnr, Metadata m) throws XDSMissingDocumentException, XDSMissingDocumentMetadataException { ArrayList<OMElement> docs = MetadataSupport.childrenWithLocalName(pnr, "Document"); ArrayList<String> doc_ids = new ArrayList<String>(); for (OMElement doc : docs) { String id = doc.getAttributeValue(MetadataSupport.id_qname); // if id == null or id =="" doc_ids.add(id); } ArrayList<String> eo_ids = m.getExtrinsicObjectIds(); for (String id : eo_ids) { if (!doc_ids.contains(id)) { throw new XDSMissingDocumentException("Document with id " + id + " is missing"); } } for (String id : doc_ids) { if (!eo_ids.contains(id)) { throw new XDSMissingDocumentMetadataException("XDSDocumentEntry with id " + id + " is missing"); } } }
From source file:fr.eolya.utils.http.HttpUtils.java
/** * Extract link in html string according to depth parameter * if depth = 0 : extract only redirection or iframe or framset urls links * if depth = 1 : extract only standard urls links (<a href='..'> * if depth = 2 : extract all links/*from w ww .j a v a2s. c o m*/ * * @param rawPage the input html string * @param depth the type of links to be extracted * @return the extracted urls in a String List * @throws IOException */ public static List<String> extractLinks(String rawPage, int depth) throws IOException { final ArrayList<String> list = new ArrayList<String>(); HtmlCleaner cleaner = new HtmlCleaner(); //CleanerProperties props = cleaner.getProperties(); //props.setXXX(...); TagNode node = cleaner.clean(rawPage); TagNode[] myNodes; if (depth == 1 || depth == 2) { // <a href= myNodes = node.getElementsByName("a", true); for (int i = 0; i < myNodes.length; i++) { String link = myNodes[i].getAttributeByName("href"); if (link != null) { link = link.trim(); if (link != null && !"".equals(link)) { if (isValidUrl(link)) if (!list.contains(link)) list.add(link); } } } // <area href= myNodes = node.getElementsByName("area", true); for (int i = 0; i < myNodes.length; i++) { String link = myNodes[i].getAttributeByName("href"); if (link != null && !"".equals(link)) if (isValidUrl(link)) if (!list.contains(link)) list.add(link); } } if (depth == 0 || depth == 2) { // <frame src= myNodes = node.getElementsByName("frame", true); for (int i = 0; i < myNodes.length; i++) { String link = myNodes[i].getAttributeByName("src"); if (link != null && !"".equals(link)) if (isValidUrl(link)) if (!list.contains(link)) list.add(link); } // <iframe src= myNodes = node.getElementsByName("iframe", true); for (int i = 0; i < myNodes.length; i++) { String link = myNodes[i].getAttributeByName("src"); if (link != null && !"".equals(link)) if (isValidUrl(link)) if (!list.contains(link)) list.add(link); } // <meta http-equiv="refresh" content= myNodes = node.getElementsByName("meta", true); for (int i = 0; i < myNodes.length; i++) { String equiv = myNodes[i].getAttributeByName("http-equiv"); if ((equiv != null) && (equiv.equalsIgnoreCase("refresh"))) { String link = myNodes[i].getAttributeByName("content"); if (link != null && !"".equals(link)) { if (link.indexOf("=") > 0) { link = link.substring(link.indexOf("=") + 1); if (!list.contains(link)) list.add(link); } } } } // Look for embeded flash // <param name="movie" value="..." myNodes = node.getElementsByName("param", true); for (int i = 0; i < myNodes.length; i++) { String name = myNodes[i].getAttributeByName("name"); if ("movie".equals(name)) { String link = myNodes[i].getAttributeByName("value"); if (!list.contains(link)) list.add(link); } } } // <frame src= (par Jericho parser car HTML Cleaner echoue) MicrosoftConditionalCommentTagTypes.register(); PHPTagTypes.register(); PHPTagTypes.PHP_SHORT.deregister(); // remove PHP short tags for this example otherwise they override processing instructions MasonTagTypes.register(); Source source = new Source(rawPage); source.fullSequentialParse(); if (depth == 0 || depth == 2) { List<Element> linkElements = source.getAllElements(HTMLElementName.FRAME); for (Element linkElement : linkElements) { String link = linkElement.getAttributeValue("src"); if (link != null && !"".equals(link)) if (isValidUrl(link)) if (!list.contains(link)) list.add(link); } } if (depth == 1 || depth == 2) { List<Element> linkElements = source.getAllElements(HTMLElementName.A); for (Element linkElement : linkElements) { String link = linkElement.getAttributeValue("href"); if (link != null && !"".equals(link)) if (isValidUrl(link)) if (!list.contains(link)) list.add(link); /* if (href==null) continue; // A element can contain other tags so need to extract the text from it: String label=linkElement.getContent().getTextExtractor().toString(); System.out.println(label+" <"+href+'>'); */ } } String strPattern = "location[.]href=['\"](.*)['\"]"; Pattern pattern = Pattern.compile(strPattern); Matcher matcher = pattern.matcher(rawPage); while (matcher.find()) { try { String url = matcher.group(1); if (url.indexOf("'") != -1) url = url.substring(0, url.indexOf("'")); if (url.indexOf('"') != -1) url = url.substring(0, url.indexOf('"')); if (!list.contains(url)) list.add(url); } catch (Exception e) { } } // Look for location.href='...' // strPattern = "href=['\"](.*)['\"]"; // pattern = Pattern.compile(strPattern); // matcher = pattern.matcher(rawPage); // while (matcher.find()) { // try{ // String url = matcher.group(1); // if (url.indexOf("'")!=-1) // url = url.substring(0, url.indexOf("'")); // if (url.indexOf('"')!=-1) // url = url.substring(0, url.indexOf('"')); // if (!list.contains(url)) // list.add(url); // } // catch (Exception e){} // } if (depth == 0 || depth == 2) { // Look for location.replace("...") strPattern = "location[.]replace\\(['\"](.*)['\"]\\)"; pattern = Pattern.compile(strPattern); matcher = pattern.matcher(rawPage); while (matcher.find()) { try { String url = matcher.group(1); if (url.indexOf("'") != -1) url = url.substring(0, url.indexOf("'")); if (url.indexOf('"') != -1) url = url.substring(0, url.indexOf('"')); if (!list.contains(url)) list.add(url); } catch (Exception e) { } } // Look for window.location='...' strPattern = "window[.]location=['\"](.*)['\"]"; pattern = Pattern.compile(strPattern); matcher = pattern.matcher(rawPage); while (matcher.find()) { try { String url = matcher.group(1); if (url.indexOf("'") != -1) url = url.substring(0, url.indexOf("'")); if (url.indexOf('"') != -1) url = url.substring(0, url.indexOf('"')); if (!list.contains(url)) list.add(url); } catch (Exception e) { } } } return list; }
From source file:com.globalsight.util.file.XliffFileUtil.java
/** * Process the files if the source file is with XLZ file format * //www .ja va 2 s .co m * @param p_workflow * * @author Vincent Yan, 2011/01/27 * @version 1.1 * @since 8.1 */ public static void processXLZFiles(Workflow p_workflow) { if (p_workflow == null || p_workflow.getAllTargetPages().size() == 0) return; TargetPage tp = null; String externalId = ""; String tmp = "", exportDir = ""; String sourceFilename = ""; String sourceDir = "", targetDir = ""; File sourceFile = null; File sourcePath = null, targetPath = null; ArrayList<String> xlzFiles = new ArrayList<String>(); try { Vector<TargetPage> targetPages = p_workflow.getAllTargetPages(); String baseCxeDocDir = AmbFileStoragePathUtils.getCxeDocDirPath().concat(File.separator); Job job = p_workflow.getJob(); String companyId = String.valueOf(job.getCompanyId()); String companyName = CompanyWrapper.getCompanyNameById(companyId); if (CompanyWrapper.SUPER_COMPANY_ID.equals(CompanyWrapper.getCurrentCompanyId()) && !CompanyWrapper.SUPER_COMPANY_ID.equals(companyId)) { baseCxeDocDir += companyName + File.separator; } int index = -1; ArrayList<String> processed = new ArrayList<String>(); for (int i = 0; i < targetPages.size(); i++) { tp = (TargetPage) targetPages.get(i); externalId = FileUtil.commonSeparator(tp.getSourcePage().getExternalPageId()); index = externalId.lastIndexOf(SEPARATE_FLAG + File.separator); if (index != -1) { tmp = externalId.substring(0, index); sourceFilename = baseCxeDocDir + tmp; sourceFile = new File(sourceFilename); if (sourceFile.exists() && sourceFile.isFile()) { // Current file is a separated file from big Xliff file // with // multiple <File> tags externalId = tmp; } } if (processed.contains(externalId)) continue; else processed.add(externalId); if (isXliffFile(externalId)) { tmp = externalId.substring(0, externalId.lastIndexOf(File.separator)); sourceFilename = baseCxeDocDir + tmp + XliffFileUtil.XLZ_EXTENSION; sourceFile = new File(sourceFilename); if (sourceFile.exists() && sourceFile.isFile()) { // source file is with xlz file format exportDir = tp.getExportSubDir(); if (exportDir.startsWith("\\") || exportDir.startsWith("/")) exportDir = exportDir.substring(1); targetDir = baseCxeDocDir + exportDir + tmp.substring(tmp.indexOf(File.separator)); if (!xlzFiles.contains(targetDir)) xlzFiles.add(targetDir); // Get exported target path targetPath = new File(targetDir); // Get source path sourceDir = baseCxeDocDir + tmp; sourcePath = new File(sourceDir); // Copy all files extracted from XLZ file from source // path to exported target path // Because Xliff files can be exported by GS // automatically, then ignore them and // just copy the others file to target path File[] files = sourcePath.listFiles(); for (File f : files) { if (f.isDirectory()) continue; if (isXliffFile(f.getAbsolutePath())) continue; org.apache.commons.io.FileUtils.copyFileToDirectory(f, targetPath); } } } } // Generate exported XLZ file and remove temporary folders for (int i = 0; i < xlzFiles.size(); i++) { targetDir = xlzFiles.get(i); targetPath = new File(targetDir); File xlzFile = new File(targetDir + XLZ_EXTENSION); if (!targetPath.exists() || xlzFile.exists()) { continue; } ZipIt.addEntriesToZipFile(xlzFile, targetPath.listFiles(), true, ""); } } catch (Exception e) { logger.error("Error in WorkflowManagerLocal.processXLZFiles. "); logger.error(e.getMessage(), e); } }
From source file:de.thkwalter.et.ortskurve.OrtskurveModellTest.java
/** * Test der Methode {@link OrtskurveModell#randpunkteOrtskurveZusammenstellen(Ortskurve)}. * // ww w . java2 s .c o m * @throws SecurityException * @throws NoSuchMethodException * @throws InvocationTargetException * @throws IllegalArgumentException * @throws IllegalAccessException */ @Test public void testRandpunkteOrtskurveZusammenstellen() throws NoSuchMethodException, SecurityException, IllegalAccessException, IllegalArgumentException, InvocationTargetException { // Die zu testende Methode wird ausgefhrt. Method methode = OrtskurveModell.class.getDeclaredMethod("randpunkteOrtskurveZusammenstellen", Ortskurve.class); methode.setAccessible(true); @SuppressWarnings("unchecked") ArrayList<Vector2D> randpunkteOrtskurve = (ArrayList<Vector2D>) methode.invoke(this.ortskurveModell, this.ortskurve); // Es wird berprft, ob die Liste die korrekten Randpunkte enthlt. assertEquals(4, randpunkteOrtskurve.size()); assertTrue(randpunkteOrtskurve.contains(new Vector2D(0, 0))); assertTrue(randpunkteOrtskurve.contains(new Vector2D(2, 0))); assertTrue(randpunkteOrtskurve.contains(new Vector2D(1, 1))); assertTrue(randpunkteOrtskurve.contains(new Vector2D(1, -1))); }
From source file:com.evanbelcher.DrillBook.display.DBMenuBar.java
/** * Checks to make sure that the current page has no duplicate dots */// w ww . j a va 2 s.c o m private boolean checkNoDuplicates() { ArrayList<String> names = new ArrayList<>(); ArrayList<String> badNames = new ArrayList<>(); PointConcurrentHashMap<Point, String> dots = Main.getCurrentPage().getDots(); for (String s : dots.values()) { if (!names.contains(s)) names.add(s); else badNames.add(s); } if (!badNames.isEmpty()) { String str = "The following players have more than one dot on the page:\n"; for (String s : badNames) str += s + "\n"; str += "\nTo data from the first page to the second page, navigate to the second page and click \"Play\"."; JOptionPane.showMessageDialog(this, str.trim(), "Conflicts!", JOptionPane.ERROR_MESSAGE); return false; } return checkNoDuplicates(Main.getState().getCurrentPage() - 1); }
From source file:com.facebook.login.GetTokenLoginMethodHandler.java
void getTokenCompleted(LoginClient.Request request, Bundle result) { if (getTokenClient != null) { getTokenClient.setCompletedListener(null); }/*www. ja v a 2 s. c o m*/ getTokenClient = null; loginClient.notifyBackgroundProcessingStop(); if (result != null) { ArrayList<String> currentPermissions = result.getStringArrayList(NativeProtocol.EXTRA_PERMISSIONS); Set<String> permissions = request.getPermissions(); if ((currentPermissions != null) && ((permissions == null) || currentPermissions.containsAll(permissions))) { // We got all the permissions we needed, so we can complete the auth now. complete(request, result); return; } // We didn't get all the permissions we wanted, so update the request with just the // permissions we still need. Set<String> newPermissions = new HashSet<String>(); for (String permission : permissions) { if (!currentPermissions.contains(permission)) { newPermissions.add(permission); } } if (!newPermissions.isEmpty()) { addLoggingExtra(LoginLogger.EVENT_EXTRAS_NEW_PERMISSIONS, TextUtils.join(",", newPermissions)); } request.setPermissions(newPermissions); } loginClient.tryNextHandler(); }
From source file:gov.nih.nci.ispy.web.ajax.DynamicReportGenerator.java
public void generateDynamicReport(String key, Map<String, String> params, String stylesheet) { String html = new String(); HttpSession session = ExecutionContext.get().getSession(false); PresentationTierCache ptc = CacheFactory.getPresentationTierCache(); BusinessTierCache btc = CacheFactory.getBusinessTierCache(); HttpServletRequest request = ExecutionContext.get().getHttpServletRequest(); // HttpServletResponse response = ExecutionContext.get().getHttpServletResponse(); //lets hold a list of xml generating jobs, so we dont keep kicking off the same job ArrayList jobs = session.getAttribute("xmlJobs") != null ? (ArrayList) session.getAttribute("xmlJobs") : new ArrayList(); //only generate XML if its not already cached...leave off for debug //RCL - remove this constraint for now, to avoid caching for tasks with the same key/id //if(ptc.getPersistableObjectFromSessionCache(session.getId(), key) == null && !jobs.contains(key)) { Object o = btc.getObjectFromSessionCache(session.getId(), key); Finding finding = (Finding) o;/*from ww w .j a v a2s . co m*/ //generate the XML and cached it ReportGeneratorHelper.generateReportXML(finding); if (!jobs.contains(key)) jobs.add(key); session.setAttribute("xmlJobs", jobs); //} Object ob = ptc.getPersistableObjectFromSessionCache(session.getId(), key); if (ob != null && ob instanceof FindingReportBean) { try { FindingReportBean frb = (FindingReportBean) ob; Document reportXML = (Document) frb.getXmlDoc(); html = ReportGeneratorHelper.renderReport(params, reportXML, stylesheet); jobs.remove(key); session.setAttribute("xmlJobs", jobs); } catch (Exception e) { html = "Error Generating the report."; } } else { html = "Error generating the report"; } //out the XHTML in the session for reference in presentation...could store in Prescache session.setAttribute(key + "_xhtml", html); return; }
From source file:com.marklogic.dom.ElementImpl.java
protected int getPrefixID(int uriAtom) { int a = -1;/* w ww.j a v a 2 s .c o m*/ boolean useDefaultNS = true; ArrayList<Integer> ubp = new ArrayList<Integer>(); long minOrdinal = 0; for (int ns = getNSNodeID(tree.nodeOrdinal[node]); ns >= 0; ns = nextNSNodeID(ns, minOrdinal)) { int uri = tree.nsNodeUriAtom[ns]; int prefix = tree.nsNodePrefixAtom[ns]; if (tree.atomString(uri) == null) { ubp.add(prefix); continue; } if (uri != uriAtom) { useDefaultNS &= (tree.atomString(prefix) != null); continue; } if (ubp.contains(prefix)) continue; if (tree.atomString(prefix) != null) { if (a == -1) a = prefix; continue; } if (useDefaultNS) return prefix; } return a; }
From source file:com.celements.web.utils.WebUtils.java
/** * @deprecated since 2.17.0 instead use WebUtilsService *///from w w w . jav a 2 s .com @Deprecated public List<String> getDocumentParentsList(String fullName, boolean includeDoc, XWikiContext context) { ArrayList<String> docParents = new ArrayList<String>(); try { String nextParent; if (includeDoc) { nextParent = fullName; } else { nextParent = getParentFullName(fullName, context); } while (!"".equals(nextParent) && (context.getWiki().exists(nextParent, context)) && !docParents.contains(nextParent)) { docParents.add(nextParent); nextParent = getParentFullName(nextParent, context); } } catch (XWikiException e) { LOGGER.error(e, e); } return docParents; }
From source file:com.esri.geoevent.solutions.adapter.cot.CoTAdapter.java
private String filterOutDots(String s) { String sStageOne = s.replace("h.", "").replace("t.", "").replace("r.", "").replace("q.", "").replace("o.", "");//from ww w. j a v a 2 s.c o m String[] s2 = sStageOne.trim().split(" "); ArrayList<String> l1 = new ArrayList<String>(); for (String item : s2) { l1.add(item); } ArrayList<String> l2 = new ArrayList<String>(); Iterator<String> iterator = l1.iterator(); while (iterator.hasNext()) { String o = (String) iterator.next(); if (!l2.contains(o)) l2.add(o); } StringBuffer sb = new StringBuffer(); for (String item : l2) { sb.append(item); sb.append(" "); } return sb.toString().trim().toLowerCase(); }