List of usage examples for org.w3c.dom Document getDoctype
public DocumentType getDoctype();
DocumentType
) associated with this document. From source file:be.fgov.kszbcss.rhq.websphere.component.j2ee.SpecVersionMeasurementHandler.java
public final void getValue(WebSphereServer server, MeasurementReport report, MeasurementScheduleRequest request) throws InterruptedException, ConnectorException, ConfigQueryException { Document document = getDeploymentDescriptor(); if (document != null) { DocumentType docType = document.getDoctype(); String version = null;/*w ww .ja va 2s. c o m*/ if (docType != null) { String publicId = docType.getPublicId(); if (log.isDebugEnabled()) { log.debug("Public ID: " + publicId); } for (Pattern pattern : getPublicIdPatterns()) { Matcher matcher = pattern.matcher(publicId); if (matcher.matches()) { version = matcher.group(1); if (log.isDebugEnabled()) { log.debug("Public ID matches pattern; version=" + version); } break; } } if (version == null) { log.warn("Unexpected public ID found in application.xml deployment descriptor: " + publicId); } } if (version == null) { version = document.getDocumentElement().getAttribute("version"); } report.addData(new MeasurementDataTrait(request, version)); } }
From source file:Main.java
/** * Try to normalize a document by removing nonsignificant whitespace. * * @see "#62006"/*from w ww. j a v a2 s . c o m*/ */ private static Document normalize(Document orig) throws IOException { DocumentBuilder builder = null; DocumentBuilderFactory factory = getFactory(false, false); try { builder = factory.newDocumentBuilder(); } catch (ParserConfigurationException e) { throw new IOException("Cannot create parser satisfying configuration parameters: " + e, e); //NOI18N } DocumentType doctype = null; NodeList nl = orig.getChildNodes(); for (int i = 0; i < nl.getLength(); i++) { if (nl.item(i) instanceof DocumentType) { // We cannot import DocumentType's, so we need to manually copy it. doctype = (DocumentType) nl.item(i); } } Document doc; if (doctype != null) { doc = builder.getDOMImplementation().createDocument(orig.getDocumentElement().getNamespaceURI(), orig.getDocumentElement().getTagName(), builder.getDOMImplementation().createDocumentType(orig.getDoctype().getName(), orig.getDoctype().getPublicId(), orig.getDoctype().getSystemId())); // XXX what about entity decls inside the DOCTYPE? doc.removeChild(doc.getDocumentElement()); } else { doc = builder.newDocument(); } for (int i = 0; i < nl.getLength(); i++) { Node node = nl.item(i); if (!(node instanceof DocumentType)) { try { doc.appendChild(doc.importNode(node, true)); } catch (DOMException x) { // Thrown in NB-Core-Build #2896 & 2898 inside GeneratedFilesHelper.applyBuildExtensions throw new IOException("Could not import or append " + node + " of " + node.getClass(), x); } } } doc.normalize(); nl = doc.getElementsByTagName("*"); // NOI18N for (int i = 0; i < nl.getLength(); i++) { Element e = (Element) nl.item(i); removeXmlBase(e); NodeList nl2 = e.getChildNodes(); for (int j = 0; j < nl2.getLength(); j++) { Node n = nl2.item(j); if (n instanceof Text && ((Text) n).getNodeValue().trim().length() == 0) { e.removeChild(n); j--; // since list is dynamic } } } return doc; }
From source file:com.amalto.core.storage.hibernate.DefaultStorageClassLoader.java
protected InputStream toInputStream(Document document) throws Exception { StringWriter buffer = new StringWriter(); Transformer transformer = XMLUtils.generateTransformer(); DocumentType doctype = document.getDoctype(); if (doctype != null) { if (doctype.getPublicId() != null) { transformer.setOutputProperty(OutputKeys.DOCTYPE_PUBLIC, doctype.getPublicId()); }//from w ww . j a v a 2 s .com if (doctype.getSystemId() != null) { transformer.setOutputProperty(OutputKeys.DOCTYPE_SYSTEM, doctype.getSystemId()); } } transformer.transform(new DOMSource(document), new StreamResult(buffer)); String cnt = buffer.toString(); return new ByteArrayInputStream(cnt.getBytes("UTF-8")); //$NON-NLS-1$ }
From source file:Main.java
/** * Writes a DOM document to a stream. The precise output format is not * guaranteed but this method will attempt to indent it sensibly. * * <p class="nonnormative"><b>Important</b>: There might be some problems * with <code><![CDATA[ ]]></code> sections in the DOM tree you pass * into this method. Specifically, some CDATA sections my not be written as * CDATA section or may be merged with other CDATA section at the same * level. Also if plain text nodes are mixed with CDATA sections at the same * level all text is likely to end up in one big CDATA section. * <br>// w w w.j av a 2s . c o m * For nodes that only have one CDATA section this method should work fine. * </p> * * @param doc DOM document to be written * @param out data sink * @param enc XML-defined encoding name (for example, "UTF-8") * @throws IOException if JAXP fails or the stream cannot be written to */ public static void write(Document doc, OutputStream out, String enc) throws IOException { if (enc == null) { throw new NullPointerException( "You must set an encoding; use \"UTF-8\" unless you have a good reason not to!"); // NOI18N } Document doc2 = normalize(doc); ClassLoader orig = Thread.currentThread().getContextClassLoader(); Thread.currentThread() .setContextClassLoader(AccessController.doPrivileged(new PrivilegedAction<ClassLoader>() { // #195921 @Override public ClassLoader run() { return new ClassLoader(ClassLoader.getSystemClassLoader().getParent()) { @Override public InputStream getResourceAsStream(String name) { if (name.startsWith("META-INF/services/")) { return new ByteArrayInputStream(new byte[0]); // JAXP #6723276 } return super.getResourceAsStream(name); } }; } })); try { TransformerFactory tf = TransformerFactory.newInstance(); Transformer t = tf.newTransformer(new StreamSource(new StringReader(IDENTITY_XSLT_WITH_INDENT))); DocumentType dt = doc2.getDoctype(); if (dt != null) { String pub = dt.getPublicId(); if (pub != null) { t.setOutputProperty(OutputKeys.DOCTYPE_PUBLIC, pub); } String sys = dt.getSystemId(); if (sys != null) { t.setOutputProperty(OutputKeys.DOCTYPE_SYSTEM, sys); } } t.setOutputProperty(OutputKeys.ENCODING, enc); try { t.setOutputProperty(ORACLE_IS_STANDALONE, "yes"); } catch (IllegalArgumentException x) { // fine, introduced in JDK 7u4 } // See #123816 Set<String> cdataQNames = new HashSet<String>(); collectCDATASections(doc2, cdataQNames); if (cdataQNames.size() > 0) { StringBuilder cdataSections = new StringBuilder(); for (String s : cdataQNames) { cdataSections.append(s).append(' '); //NOI18N } t.setOutputProperty(OutputKeys.CDATA_SECTION_ELEMENTS, cdataSections.toString()); } Source source = new DOMSource(doc2); Result result = new StreamResult(out); t.transform(source, result); } catch (javax.xml.transform.TransformerException | RuntimeException e) { // catch anything that happens throw new IOException(e); } finally { Thread.currentThread().setContextClassLoader(orig); } }
From source file:com.autentia.mvn.plugin.changes.BugzillaChangesMojo.java
/** * Addapts bugzilla XML document for transformations * //w ww. j a va 2s .co m * @param docBugzilla */ private void cleanBugzillaDocument(final Document docBugzilla) { // quitamos el DTD final Node docType = docBugzilla.getDoctype(); docBugzilla.removeChild(docType); // ponemos el ttulo final Element title = docBugzilla.createElement(ELEMENT_TITLE); title.appendChild(docBugzilla.createTextNode(this.project.getName())); docBugzilla.getDocumentElement().appendChild(title); // ponemos los atributos de version para la ordenacin final NodeList target_milestones = docBugzilla.getElementsByTagName(ELEMENT_TARGET_MILESTONE); for (int i = 0; i < target_milestones.getLength(); i++) { final Element target_milestone = (Element) target_milestones.item(i); final String version = target_milestone.getTextContent(); final String[] versions = version.split("\\."); // solo tenemos en cuenta las dos primeras // (ej. para version="1.23" tenemos la version1="1" y version2="23"; // y para version="1.23.3" se tiene lo mismo) String version1 = ""; if (versions.length > 0) { version1 = versions[0]; } String version2 = ""; if (versions.length >= 2) { version2 = versions[1]; } target_milestone.setAttribute(ATTRIBUTE_VERSION1, version1); target_milestone.setAttribute(ATTRIBUTE_VERSION2, version2); } // si hay que ajustar los desarrolladores lo procesamos if (this.fitDevelopers) { final NodeList assigned_tos = docBugzilla.getElementsByTagName(ELEMENT_ASSIGNED_TO); for (int i = 0; i < assigned_tos.getLength(); i++) { final Element assigned_to = (Element) assigned_tos.item(i); String developer = assigned_to.getTextContent(); final int index = developer.indexOf("@"); if (index != -1) { developer = developer.substring(0, index); } // quitamos el texto final NodeList childs = assigned_to.getChildNodes(); for (int j = 0; j < childs.getLength(); j++) { final Node child = childs.item(j); assigned_to.removeChild(child); // disminuimos j debido a que tambin se quita del nodelist j--; } assigned_to.appendChild(docBugzilla.createTextNode(developer)); } } // eliminamos los nodos que no son necesarios final String[] nodes2Clean = { "creation_ts", "reporter_accessible", "cclist_accessible", "classification_id", "classification", "product", "component", "version", "rep_platform", "op_sys", "bug_status", "resolution", "priority", "everconfirmed", "estimated_time", "remaining_time", "actual_time", "who", "thetext" }; for (final String node2clean : nodes2Clean) { this.removeNodes(docBugzilla.getElementsByTagName(node2clean)); } }
From source file:DOMWriter.java
/** Writes the specified node, recursively. */ public void write(Node node) { // is there anything to do? if (node == null) { return;/*from w w w. j a va2s . co m*/ } short type = node.getNodeType(); switch (type) { case Node.DOCUMENT_NODE: { Document document = (Document) node; fXML11 = "1.1".equals(getVersion(document)); if (!fCanonical) { if (fXML11) { fOut.println("<?xml version=\"1.1\" encoding=\"UTF-8\"?>"); } else { fOut.println("<?xml version=\"1.0\" encoding=\"UTF-8\"?>"); } fOut.flush(); write(document.getDoctype()); } write(document.getDocumentElement()); break; } case Node.DOCUMENT_TYPE_NODE: { DocumentType doctype = (DocumentType) node; fOut.print("<!DOCTYPE "); fOut.print(doctype.getName()); String publicId = doctype.getPublicId(); String systemId = doctype.getSystemId(); if (publicId != null) { fOut.print(" PUBLIC '"); fOut.print(publicId); fOut.print("' '"); fOut.print(systemId); fOut.print('\''); } else if (systemId != null) { fOut.print(" SYSTEM '"); fOut.print(systemId); fOut.print('\''); } String internalSubset = doctype.getInternalSubset(); if (internalSubset != null) { fOut.println(" ["); fOut.print(internalSubset); fOut.print(']'); } fOut.println('>'); break; } case Node.ELEMENT_NODE: { fOut.print('<'); fOut.print(node.getNodeName()); Attr attrs[] = sortAttributes(node.getAttributes()); for (int i = 0; i < attrs.length; i++) { Attr attr = attrs[i]; fOut.print(' '); fOut.print(attr.getNodeName()); fOut.print("=\""); normalizeAndPrint(attr.getNodeValue(), true); fOut.print('"'); } fOut.print('>'); fOut.flush(); Node child = node.getFirstChild(); while (child != null) { write(child); child = child.getNextSibling(); } break; } case Node.ENTITY_REFERENCE_NODE: { if (fCanonical) { Node child = node.getFirstChild(); while (child != null) { write(child); child = child.getNextSibling(); } } else { fOut.print('&'); fOut.print(node.getNodeName()); fOut.print(';'); fOut.flush(); } break; } case Node.CDATA_SECTION_NODE: { if (fCanonical) { normalizeAndPrint(node.getNodeValue(), false); } else { fOut.print("<![CDATA["); fOut.print(node.getNodeValue()); fOut.print("]]>"); } fOut.flush(); break; } case Node.TEXT_NODE: { normalizeAndPrint(node.getNodeValue(), false); fOut.flush(); break; } case Node.PROCESSING_INSTRUCTION_NODE: { fOut.print("<?"); fOut.print(node.getNodeName()); String data = node.getNodeValue(); if (data != null && data.length() > 0) { fOut.print(' '); fOut.print(data); } fOut.print("?>"); fOut.flush(); break; } case Node.COMMENT_NODE: { if (!fCanonical) { fOut.print("<!--"); String comment = node.getNodeValue(); if (comment != null && comment.length() > 0) { fOut.print(comment); } fOut.print("-->"); fOut.flush(); } } } if (type == Node.ELEMENT_NODE) { fOut.print("</"); fOut.print(node.getNodeName()); fOut.print('>'); fOut.flush(); } }
From source file:com.idiominc.ws.opentopic.fo.i18n.PreprocessorTask.java
@Override public void execute() throws BuildException { checkParameters();//from w w w . j av a 2 s .c o m log("Processing " + input + " to " + output, Project.MSG_INFO); OutputStream out = null; try { final DocumentBuilder documentBuilder = XMLUtils.getDocumentBuilder(); documentBuilder.setEntityResolver(xmlcatalog); final Document doc = documentBuilder.parse(input); final Document conf = documentBuilder.parse(config); final MultilanguagePreprocessor preprocessor = new MultilanguagePreprocessor(new Configuration(conf)); final Document document = preprocessor.process(doc); final TransformerFactory transformerFactory = TransformerFactory.newInstance(); transformerFactory.setURIResolver(xmlcatalog); final Transformer transformer; if (style != null) { log("Loading stylesheet " + style, Project.MSG_INFO); transformer = transformerFactory.newTransformer(new StreamSource(style)); } else { transformer = transformerFactory.newTransformer(); } transformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "no"); transformer.setOutputProperty(OutputKeys.INDENT, "no"); transformer.setOutputProperty(OutputKeys.ENCODING, "UTF-8"); if (doc.getDoctype() != null) { transformer.setOutputProperty(OutputKeys.DOCTYPE_PUBLIC, doc.getDoctype().getPublicId()); transformer.setOutputProperty(OutputKeys.DOCTYPE_SYSTEM, doc.getDoctype().getSystemId()); } out = new FileOutputStream(output); final StreamResult streamResult = new StreamResult(out); transformer.transform(new DOMSource(document), streamResult); } catch (final RuntimeException e) { throw e; } catch (final Exception e) { throw new BuildException(e); } finally { IOUtils.closeQuietly(out); } }
From source file:org.alloy.metal.xml.merge.MergeContext.java
/** * Merge 2 xml document streams together into a final resulting stream. During * the merge, various merge business rules are followed based on configuration * defined for various merge points.//from ww w . j a v a 2 s. c o m * * @param stream1 * @param stream2 * @return the stream representing the merged document * @throws org.broadleafcommerce.common.extensibility.context.merge.exceptions.MergeException */ public ResourceInputStream merge(ResourceInputStream stream1, ResourceInputStream stream2) throws MergeException { try { Document doc1 = builder.parse(stream1); Document doc2 = builder.parse(stream2); List<Node> exhaustedNodes = new ArrayList<Node>(); // process any defined handlers for (MergeHandler handler : this.handlers) { if (LOG.isDebugEnabled()) { LOG.debug("Processing handler: " + handler.getXPath()); } MergePoint point = new MergePoint(handler, doc1, doc2); List<Node> list = point.merge(exhaustedNodes); exhaustedNodes.addAll(list); } TransformerFactory tFactory = TransformerFactory.newInstance(); Transformer xmlTransformer = tFactory.newTransformer(); xmlTransformer.setOutputProperty(OutputKeys.VERSION, "1.0"); xmlTransformer.setOutputProperty(OutputKeys.ENCODING, _String.CHARACTER_ENCODING.toString()); xmlTransformer.setOutputProperty(OutputKeys.METHOD, "xml"); xmlTransformer.setOutputProperty(OutputKeys.INDENT, "yes"); if (doc1.getDoctype() != null && doc1.getDoctype().getSystemId() != null) { xmlTransformer.setOutputProperty(OutputKeys.DOCTYPE_SYSTEM, doc1.getDoctype().getSystemId()); } DOMSource source = new DOMSource(doc1); ByteArrayOutputStream baos = new ByteArrayOutputStream(); BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(baos)); StreamResult result = new StreamResult(writer); xmlTransformer.transform(source, result); byte[] itemArray = baos.toByteArray(); return new ResourceInputStream(new ByteArrayInputStream(itemArray), stream2.getName(), stream1.getNames()); } catch (Exception e) { throw new MergeException(e); } }
From source file:jef.tools.XMLUtils.java
private static void output(Node node, StreamResult sr, String encoding, int indent, Boolean XmlDeclarion) throws IOException { if (node.getNodeType() == Node.ATTRIBUTE_NODE) { sr.getWriter().write(node.getNodeValue()); sr.getWriter().flush();// w w w . ja va2s. c o m return; } TransformerFactory tf = TransformerFactory.newInstance(); Transformer t = null; try { if (indent > 0) { try { tf.setAttribute("indent-number", indent); t = tf.newTransformer(); // ?XML??XML? t.setOutputProperty(OutputKeys.INDENT, "yes"); } catch (Exception e) { } } else { t = tf.newTransformer(); } t.setOutputProperty(OutputKeys.METHOD, "xml"); if (encoding != null) { t.setOutputProperty(OutputKeys.ENCODING, encoding); } if (XmlDeclarion == null) { XmlDeclarion = (node instanceof Document); } if (node instanceof Document) { Document doc = (Document) node; if (doc.getDoctype() != null) { t.setOutputProperty(javax.xml.transform.OutputKeys.DOCTYPE_PUBLIC, doc.getDoctype().getPublicId()); t.setOutputProperty(javax.xml.transform.OutputKeys.DOCTYPE_SYSTEM, doc.getDoctype().getSystemId()); } } if (XmlDeclarion) { t.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "no"); } else { t.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes"); } } catch (Exception tce) { throw new IOException(tce); } DOMSource doms = new DOMSource(node); try { t.transform(doms, sr); } catch (TransformerException te) { IOException ioe = new IOException(); ioe.initCause(te); throw ioe; } }
From source file:org.apache.geode.management.internal.configuration.utils.XmlUtils.java
/** * Upgrade the schema of a given Config XMl <code>document</code> to the given * <code>namespace</code>, <code>schemaLocation</code> and <code>version</code>. * /*from w ww. jav a2s. co m*/ * @param document Config XML {@link Document} to upgrade. * @param namespaceUri Namespace URI to upgrade to. * @param schemaLocation Schema location to upgrade to. * @throws XPathExpressionException * @throws ParserConfigurationException * @since GemFire 8.1 */ public static Document upgradeSchema(Document document, final String namespaceUri, final String schemaLocation, String schemaVersion) throws XPathExpressionException, ParserConfigurationException { if (StringUtils.isBlank(namespaceUri)) { throw new IllegalArgumentException("namespaceUri"); } if (StringUtils.isBlank(schemaLocation)) { throw new IllegalArgumentException("schemaLocation"); } if (StringUtils.isBlank(schemaVersion)) { throw new IllegalArgumentException("schemaVersion"); } if (null != document.getDoctype()) { Node root = document.getDocumentElement(); Document copiedDocument = getDocumentBuilder().newDocument(); Node copiedRoot = copiedDocument.importNode(root, true); copiedDocument.appendChild(copiedRoot); document = copiedDocument; } final Element root = document.getDocumentElement(); // since root is the cache element, then this oldNamespace will be the cache's namespaceURI String oldNamespaceUri = root.getNamespaceURI(); // update the namespace if (!namespaceUri.equals(oldNamespaceUri)) { changeNamespace(root, oldNamespaceUri, namespaceUri); } // update the version root.setAttribute("version", schemaVersion); // update the schemaLocation attribute Node schemaLocationAttr = root.getAttributeNodeNS(W3C_XML_SCHEMA_INSTANCE_NS_URI, W3C_XML_SCHEMA_INSTANCE_ATTRIBUTE_SCHEMA_LOCATION); String xsiPrefix = findPrefix(root, W3C_XML_SCHEMA_INSTANCE_NS_URI); ; Map<String, String> uriToLocation = new HashMap<>(); if (schemaLocationAttr != null) { uriToLocation = buildSchemaLocationMap(schemaLocationAttr.getNodeValue()); } else if (xsiPrefix == null) { // this namespace is not defined yet, define it xsiPrefix = W3C_XML_SCHEMA_INSTANCE_PREFIX; root.setAttribute("xmlns:" + xsiPrefix, W3C_XML_SCHEMA_INSTANCE_NS_URI); } uriToLocation.remove(oldNamespaceUri); uriToLocation.put(namespaceUri, schemaLocation); root.setAttributeNS(W3C_XML_SCHEMA_INSTANCE_NS_URI, xsiPrefix + ":" + W3C_XML_SCHEMA_INSTANCE_ATTRIBUTE_SCHEMA_LOCATION, getSchemaLocationValue(uriToLocation)); return document; }