Example usage for org.dom4j Node getText

List of usage examples for org.dom4j Node getText

Introduction

In this page you can find the example usage for org.dom4j Node getText.

Prototype

String getText();

Source Link

Document

Returns the text of this node.

Usage

From source file:com.dp2345.plugin.tenpayBank.TenpayBankPlugin.java

License:Open Source License

@SuppressWarnings("unchecked")
@Override// w w w  . j  a v  a 2 s .co  m
public boolean verifyNotify(String sn, NotifyMethod notifyMethod, HttpServletRequest request) {
    PluginConfig pluginConfig = getPluginConfig();
    Payment payment = getPayment(sn);
    if (generateSign(request.getParameterMap()).equals(request.getParameter("sign"))
            && pluginConfig.getAttribute("partner").equals(request.getParameter("partner"))
            && sn.equals(request.getParameter("out_trade_no"))
            && "0".equals(request.getParameter("trade_state"))
            && payment.getAmount().multiply(new BigDecimal(100))
                    .compareTo(new BigDecimal(request.getParameter("total_fee"))) == 0) {
        try {
            Map<String, Object> parameterMap = new HashMap<String, Object>();
            parameterMap.put("input_charset", "utf-8");
            parameterMap.put("sign_type", "MD5");
            parameterMap.put("partner", pluginConfig.getAttribute("partner"));
            parameterMap.put("notify_id", request.getParameter("notify_id"));
            String verifyUrl = "https://gw.tenpay.com/gateway/simpleverifynotifyid.xml?input_charset=utf-8&sign_type=MD5&partner="
                    + pluginConfig.getAttribute("partner") + "&notify_id=" + request.getParameter("notify_id")
                    + "&sign=" + generateSign(parameterMap);
            Document document = new SAXReader().read(new URL(verifyUrl));
            Node node = document.selectSingleNode("/root/retcode");
            if ("0".equals(node.getText().trim())) {
                return true;
            }
        } catch (DocumentException e) {
            e.printStackTrace();
        } catch (MalformedURLException e) {
            e.printStackTrace();
        }
    }
    return false;
}

From source file:com.dp2345.plugin.tenpayPartner.TenpayPartnerPlugin.java

License:Open Source License

@SuppressWarnings("unchecked")
@Override/*  ww w. jav a 2  s .c  o  m*/
public boolean verifyNotify(String sn, NotifyMethod notifyMethod, HttpServletRequest request) {
    PluginConfig pluginConfig = getPluginConfig();
    if (generateSign(request.getParameterMap()).equals(request.getParameter("sign"))
            && pluginConfig.getAttribute("partner").equals(request.getParameter("partner"))
            && sn.equals(request.getParameter("out_trade_no"))
            && "0".equals(request.getParameter("trade_state"))) {
        try {
            Map<String, Object> parameterMap = new HashMap<String, Object>();
            parameterMap.put("input_charset", "utf-8");
            parameterMap.put("sign_type", "MD5");
            parameterMap.put("partner", pluginConfig.getAttribute("partner"));
            parameterMap.put("notify_id", request.getParameter("notify_id"));
            String verifyUrl = "https://gw.tenpay.com/gateway/simpleverifynotifyid.xml?input_charset=utf-8&sign_type=MD5&partner="
                    + pluginConfig.getAttribute("partner") + "&notify_id=" + request.getParameter("notify_id")
                    + "&sign=" + generateSign(parameterMap);
            Document document = new SAXReader().read(new URL(verifyUrl));
            Node node = document.selectSingleNode("/root/retcode");
            if ("0".equals(node.getText().trim())) {
                return true;
            }
        } catch (DocumentException e) {
            e.printStackTrace();
        } catch (MalformedURLException e) {
            e.printStackTrace();
        }
    }
    return false;
}

From source file:com.feilong.framework.netpay.payment.adaptor.alipay.pconline.AlipayOnlineAdaptor.java

License:Apache License

/**
 * ?query_timestamp??? <br>//  w  ww.j a  v a  2  s  . c om
 * ??XML???SSL?.
 * 
 * @return 
 */
private final String getAnti_phishing_key() {

    // query_timestamp?URL
    StringBuilder sb = new StringBuilder();
    sb.append(gateway);
    sb.append("?");
    sb.append("service=" + service_query_timestamp);
    sb.append("&");
    sb.append("partner=" + partner);

    InputStream inputStream = null;
    try {
        URL url = new URL(sb.toString());
        inputStream = url.openStream();

        SAXReader saxReader = new SAXReader();
        Document document = saxReader.read(inputStream);

        if (log.isDebugEnabled()) {
            log.debug("document:{}", document.toString());

            // <alipay>
            // <is_success>T</is_success>
            // <request>
            // <param name="service">query_timestamp</param>
            // <param name="partner">2088201564862550</param>
            // </request>
            // <response>
            // <timestamp>
            // <encrypt_key>KPr8DuZp5xc031OVxw==</encrypt_key>
            // </timestamp>
            // </response>
            // <sign>1fc434a9045f5681736cd47ee2faa41a</sign>
            // <sign_type>MD5</sign_type>
            // </alipay>
        }

        StringBuilder result = new StringBuilder();
        @SuppressWarnings("unchecked")
        List<Node> nodeList = document.selectNodes("//alipay/*");
        for (Node node : nodeList) {
            // ?????
            String name = node.getName();
            String text = node.getText();
            if (name.equals("is_success") && text.equals("T")) {
                // ??
                @SuppressWarnings("unchecked")
                List<Node> nodeList1 = document.selectNodes("//response/timestamp/*");
                for (Node node1 : nodeList1) {
                    result.append(node1.getText());
                }
            }
        }

        String anti_phishing_key = result.toString();

        if (log.isDebugEnabled()) {
            log.debug("anti_phishing_key value:[{}]", anti_phishing_key);
        }

        return anti_phishing_key;
    } catch (MalformedURLException e) {
        log.error(e.getClass().getName(), e);
    } catch (IOException e) {
        throw new UncheckedIOException(e);
    } catch (DocumentException e) {
        log.error(e.getClass().getName(), e);
    } finally {
        try {
            if (null != inputStream) {
                inputStream.close();
            }
        } catch (IOException e) {
            throw new UncheckedIOException(e);
        }
    }
    //  ?,???
    return "";
}

From source file:com.finderbots.miner2.tomatoes.MineRTCriticsPreferences.java

License:Apache License

private String mineItemName(Document doc) {
    List<Node> titleNodes = getNodes(doc,
            "//*[contains(concat( \" \", @class, \" \" ), concat( \" \", \"movie_title\", \" \" ))]//span");
    Node titleNode = titleNodes.get(0);
    if (titleNode == null) {
        LOGGER.warn("Can't locate media title in page: " + _result.getUrl());
        throw new IllegalStateException();
    }// w  ww  .ja  v  a 2  s .c o m
    String title = titleNode.getText();
    if (title == null || title.isEmpty()) {
        LOGGER.warn("Can't locate media title in page: " + _result.getUrl());
        throw new IllegalStateException();
    }
    return title;
}

From source file:com.flaptor.hounder.indexer.DocumentConverter.java

License:Apache License

/**
 * @todo refactor this method, is too long
 *//* w  w w.  j ava  2  s  .  com*/
private org.apache.lucene.document.Document processAdd(final Element e) throws IllegalDocumentException {
    // TODO: This method is too long, refactor.
    logger.debug("Processing Add");

    float documentBoost;
    Node node = e.selectSingleNode("boost");
    if (null == node) {
        documentBoost = 1.0F;
    } else {
        documentBoost = Float.parseFloat(node.getText());
        if (logger.isEnabledFor(Level.DEBUG)) {
            logger.debug("Using non-default document boost of " + documentBoost);
        }
    }
    if (Float.isNaN(documentBoost) || Float.isInfinite(documentBoost) || documentBoost <= 0) {
        throw new IllegalDocumentException("Document with invalid boost (" + documentBoost + ") received.");
    }

    org.apache.lucene.document.Document ldoc = new org.apache.lucene.document.Document();
    ldoc.setBoost(documentBoost);

    // For comparison with the required fields we keep track of the added
    // fields.
    HashSet<String> providedFields = new HashSet<String>();

    //First, we add the documentId as a field under the name provided in the configuration (docIdName)
    node = e.selectSingleNode("documentId");
    if (null == node) {
        throw new IllegalDocumentException("Add document missing documentId.");
    }
    String docIdText = node.getText();
    //now we add the documentId as another field, using the name provided in the configuration (docIdName)
    Field lfield = new Field(docIdName, docIdText, Field.Store.YES, Field.Index.NOT_ANALYZED);
    ldoc.add(lfield);
    providedFields.add(docIdName);
    if (logger.isEnabledFor(Level.DEBUG)) {
        logger.debug("Writer - adding documentId field:" + docIdName
                + ", index: true, store: true, token: false, text: " + docIdText);
    }
    // Now we add the regular fields
    for (Iterator iter = e.elementIterator("field"); iter.hasNext();) {
        Element field = (Element) iter.next();
        String fieldName, storedS, indexedS, tokenizedS, boostS, fieldText;
        boolean stored, tokenized, indexed;
        float boost = 1;

        fieldName = field.valueOf("@name");
        if (fieldName.equals("")) {
            throw new IllegalDocumentException("Field without name.");
        }

        //There cannot be a field with the name used to store the documentId (docIdName)
        //as it would collide with the documentId per se when saved to the lucene index.
        fieldText = field.getText();
        if (fieldName.equals(docIdName)) {
            throw new IllegalDocumentException(
                    "This document contains a field with the same name as the configured name "
                            + "to save the documentId( " + docIdName + ").");
        }

        storedS = field.valueOf("@stored");
        if (storedS.equals("")) {
            throw new IllegalDocumentException("Field without stored attribute.");
        }
        stored = Boolean.valueOf(storedS);

        indexedS = field.valueOf("@indexed");
        if (indexedS.equals("")) {
            throw new IllegalDocumentException("Field without indexed attribute.");
        }
        indexed = Boolean.valueOf(indexedS);
        //Lucene complains of an unindexed unstored field with a runtime exception
        //and it makes no sense anyway
        if (!(indexed || stored)) {
            throw new IllegalDocumentException("processAdd: unindexed unstored field \"" + fieldName + "\".");
        }

        tokenizedS = field.valueOf("@tokenized");
        if (tokenizedS.equals("")) {
            throw new IllegalDocumentException("Field without tokenized attribute.");
        }
        tokenized = Boolean.valueOf(tokenizedS);

        boostS = field.valueOf("@boost");
        if (!boostS.equals("")) {
            try {
                boost = new Float(boostS).floatValue();
            } catch (NumberFormatException exception) {
                throw new IllegalDocumentException(
                        "Unparsable boost value (" + boostS + ") for field  \"" + fieldName + "\".");
            }
        }

        // Now we add the fields. Depending on the parameter stored, indexed
        // and tokenized we call a different field constructor.
        lfield = null;
        Field.Index indexType = (indexed ? (tokenized ? Field.Index.ANALYZED : Field.Index.NOT_ANALYZED)
                : Field.Index.NO);
        Field.Store storeType;
        if (!stored) {
            storeType = Field.Store.NO;
        } else {
            if (compressedFields.contains(fieldName)) {
                storeType = Field.Store.COMPRESS;
            } else {
                storeType = Field.Store.YES;
            }
        }
        lfield = new Field(fieldName, fieldText, storeType, indexType);

        lfield.setBoost(boost);
        providedFields.add(fieldName); // for later comparison with the required fields

        ldoc.add(lfield);
        if (logger.isEnabledFor(Level.DEBUG)) {
            logger.debug("Writer - adding field:" + fieldName + ", index:" + indexed + ", store:" + stored
                    + ", token:" + tokenized + " ,boost: " + boost + ", text: " + fieldText);
        }
    } // for  (field iterator)

    HashSet<String> providedPayloads = new HashSet<String>();
    // Now we add the payloads
    for (Iterator iter = e.elementIterator("payload"); iter.hasNext();) {
        Element payload = (Element) iter.next();

        String payloadName = payload.valueOf("@name");
        if (payloadName.equals("")) {
            throw new IllegalDocumentException("Payload without name.");
        }
        providedPayloads.add(payloadName);
        try {
            Long payloadValue = Long.parseLong(payload.getText());
            ldoc.add(new Field(payloadName, new FixedValueTokenStream(payloadName, payloadValue)));
            logger.debug("Adding payload \"" + payloadName + "\" to document \"" + docIdText + "\" with value "
                    + payloadValue);
        } catch (NumberFormatException nfe) {
            throw new IllegalDocumentException(
                    "Writer - while parsing Long payload \"" + payloadName + "\": " + nfe.getMessage());
        }
    }

    // no we test for the presence of the required fields
    if (!providedFields.containsAll(requiredFields) || !providedPayloads.containsAll(requiredPayloads)) {
        StringBuffer sb = new StringBuffer();
        sb.append("Document with missing required fields or payloads. Ignoring addition.\n");
        sb.append("Provided fields are: \n");
        for (String field : providedFields) {
            sb.append(field + "\n");
        }
        sb.append("The fields required are: \n");
        for (String field : requiredFields) {
            sb.append(field + "\n");
        }

        sb.append("Provided payloads are: \n");
        for (String payload : providedPayloads) {
            sb.append(payload + "\n");
        }
        sb.append("Required payloads are: \n");
        for (String payload : requiredPayloads) {
            sb.append(payload + "\n");
        }
        throw new IllegalDocumentException(sb.toString());
    }
    return ldoc;
}

From source file:com.flaptor.hounder.indexer.FieldFormatCheckerModule.java

License:Apache License

/**
 * Processes the document. Takes the xml document, prints it to the logger,
 * and returns the same document./*  ww  w  .j  av a  2s . c om*/
 */
protected Document[] internalProcess(final Document doc) {

    // check that this is a documentAdd
    // otherwise, skip.
    Node root = doc.getRootElement();
    if (!root.getName().equals("documentAdd"))
        return new Document[] { doc };

    for (String longField : longFields) {
        Node node = doc.selectSingleNode("//field[@name='" + longField + "']");
        if (null == node) {
            logger.error("Document lacks field " + longField + ". Dropping document. ");
            if (logger.isDebugEnabled()) {
                logger.debug(DomUtil.domToString(doc) + " lacks field " + longField);
            }
            return new Document[0];
        }

        String text = node.getText();
        try {
            Long.parseLong(text);
        } catch (NumberFormatException e) {
            logger.error(
                    "Document has field " + longField + ", but it is not parseable as Long. Dropping document");
            if (logger.isDebugEnabled()) {
                logger.debug(DomUtil.domToString(doc) + " contains field " + longField
                        + " but it is not parseable as Long. Node:" + node.toString() + " - text: " + text);
            }
            return new Document[0];
        }
    }

    // TODO insert more field type checks here
    Document[] docs = { doc };
    return docs;
}

From source file:com.flaptor.hounder.indexer.HtmlParser.java

License:Apache License

/**
 * Parses a tag to produce a field./*from  w  w  w .ja v a2 s .  com*/
 * @param doc the doc to modify
 * @throw exception on error, signaling the main method to return no document.
 */
private void processTag(Document doc, final String tagName, final String fieldName) throws Exception {
    Node bodyElement = doc.selectSingleNode("/*/" + tagName);
    if (null == bodyElement) {
        logger.warn("Content element missing from document. I was expecting a '" + tagName + "'. Will not add '"
                + fieldName + "' field.");
        return;
    }

    Node destElement = doc.selectSingleNode("//field[@name='" + fieldName + "']");
    if (null != destElement) {
        logger.warn("Parsed element '" + fieldName + "' already present in document. Will not overwrite.");
        return;
    }

    ParseOutput out = parser.parse("", bodyElement.getText().getBytes("UTF-8"), "UTF-8");

    for (String field : extraFields) {
        String content = out.getField(field);
        if (null == content) {
            logger.debug("had document without " + field + " field. Continuing with other fields.");
            continue;
        }
        Element docField = DocumentHelper.createElement("field");
        docField.addText(content);
        docField.addAttribute("name", field);
        docField.addAttribute("indexed", Boolean.toString(INDEXED));
        docField.addAttribute("stored", Boolean.toString(STORED));
        docField.addAttribute("tokenized", "true");
        bodyElement.getParent().add(docField);
    }

    String text = out.getText();
    Element field = DocumentHelper.createElement("field");
    field.addText(text);
    field.addAttribute("name", fieldName);
    field.addAttribute("indexed", Boolean.toString(INDEXED));
    field.addAttribute("stored", Boolean.toString(STORED));
    field.addAttribute("tokenized", "true");
    bodyElement.getParent().add(field);
}

From source file:com.flaptor.hounder.indexer.MultiIndexer.java

License:Apache License

/**
 * Adds a document to the indexing queue.
 * @param doc the request Document/*  w  w w .j ava2s .com*/
 * @return SUCCESS, RETRY_QUEUE_FULL.
 * @throws IllegalStateException if the state of the indexer is not running.
 * @see com.flaptor.util.remote.XmlrpcServer
 */
public IndexerReturnCode index(Document doc) {
    if (state != RunningState.RUNNING) {
        throw new IllegalStateException(
                "index: Trying to index a document but the MultiIndexer is no longer running.");
    }

    if (null == doc) {
        throw new IllegalArgumentException("Got null Document.");
    }

    if (useXslt) {
        Document[] docs = xsltModule.process(doc);
        if (null == docs || docs.length != 1) {
            String error = "XsltModule did not return 1 document. It returned "
                    + ((null == docs) ? "null" : String.valueOf(docs.length))
                    + ". This is wrong and we will not continue";
            logger.fatal(error);
            System.exit(-1);
            //this is just to make the damn compiler happy
            return IndexerReturnCode.FAILURE;
        }
        // else
        doc = docs[0];
    }

    if (null == doc) {
        logger.fatal(
                "got null document after transformation. this should not happen. I can't tell which is the offending document.");
        System.exit(1);
    }

    // check commands
    Node command = doc.selectSingleNode("/command");
    if (null != command) {
        return processCommand(doc);
    }

    // else, it may be a document to index / delete
    // get the documentId.

    Node node = doc.selectSingleNode("//documentId");
    if (null == node) {
        logger.error("Document missing documentId. Will not index it.");
        logger.error("Document was : " + DomUtil.domToString(doc));
        return IndexerReturnCode.FAILURE;
    }

    // get the target indexer.
    // Make sure that urls begin with http://
    String url = node.getText();
    int target = hashFunction.hash(url);

    // send the document to the target indexer.
    try {
        IRemoteIndexer indexer = indexers.get(target);
        logger.debug("Sending " + node.getText() + " to indexer " + target);
        IndexerReturnCode res = indexer.index(doc);
        if (res != IndexerReturnCode.SUCCESS) {
            logger.debug("Problem (" + res + ") indexing " + node.getText() + " to indexer " + target);
        }
        return res;
    } catch (com.flaptor.util.remote.RpcException e) {
        logger.error("Connection failed: ", e);
        return IndexerReturnCode.FAILURE;
    }
}

From source file:com.flaptor.hounder.indexer.MultiIndexer.java

License:Apache License

protected IndexerReturnCode processCommand(final Document doc) {
    Node commandNode = doc.selectSingleNode("/command");
    Node attNode = commandNode.selectSingleNode("@node");

    if (null == attNode) { // so it is for all nodes

        for (IRemoteIndexer indexer : indexers) {
            boolean accepted = false;
            int retries = 0;

            // try until accepted, or too much retries.
            while (!accepted && retries < RETRY_LIMIT) {
                try {
                    IndexerReturnCode retValue = indexer.index(doc);
                    if (IndexerReturnCode.SUCCESS == retValue) {
                        accepted = true;
                    } else {
                        retries++;//www .j a  v  a2s .  c  o  m
                        Execute.sleep(10 * 1000); // wait 10 seconds before retry
                    }
                } catch (com.flaptor.util.remote.RpcException e) {
                    logger.error("processCommand: Connection failed: " + e.getMessage(), e);
                }
            }
            // if could not send to indexer
            if (!accepted) {
                logger.error(
                        "processCommand: tried " + RETRY_LIMIT + " times to index command, but failed on node "
                                + indexer.toString() + ". Will not continue with other indexers.");
                return IndexerReturnCode.FAILURE;

            }
        }
        // in case no one returned Indexer.FAILURE
        return IndexerReturnCode.SUCCESS;
    } else { // specific node

        try {
            int indexerNumber = Integer.parseInt(attNode.getText());
            if (indexerNumber < indexers.size() && indexerNumber >= 0) {
                IRemoteIndexer indexer = indexers.get(indexerNumber);
                try {
                    return indexer.index(doc);
                } catch (com.flaptor.util.remote.RpcException e) {
                    logger.error("processCommand: while sending command to single node " + indexer.toString()
                            + " " + e.getMessage(), e);
                    return IndexerReturnCode.FAILURE;
                }
            } else {
                logger.error("processCommand: received command for node number out of indexers range. Received "
                        + indexerNumber + " and have " + indexers.size() + " indexers. Ignoring command.");
                return IndexerReturnCode.FAILURE;
            }
        } catch (NumberFormatException e) {
            logger.error("processCommand: can not parse node number: " + e, e);
            return IndexerReturnCode.PARSE_ERROR;
        }
    }
}

From source file:com.flaptor.hounder.indexer.Writer.java

License:Apache License

private void processDelete(final Element e) {
    if (logger.isEnabledFor(Level.DEBUG)) {
        logger.debug("Processing delete");
    }//from  ww  w. j av a  2 s . co  m
    Node node = e.selectSingleNode("documentId");
    if (null != node) {
        iwp.deleteDocument(node.getText());
    } else {
        logger.error("documentId node not found. Ignoring deletion.");
    }
}