Example usage for java.net URL getQuery

List of usage examples for java.net URL getQuery

Introduction

In this page you can find the example usage for java.net URL getQuery.

Prototype

public String getQuery() 

Source Link

Document

Gets the query part of this URL .

Usage

From source file:org.wso2.identity.integration.test.auth.RiskBasedLoginTestCase.java

@Test(groups = "wso2.is", description = "Check conditional authentication flow.")
public void testAuthenticationForRisk() throws Exception {

    userRiskScores.put(userInfo.getUserName(), 1);
    cookieStore.clear();//ww w. j  a v  a  2  s . c  o  m

    response = loginWithOIDC(PRIMARY_IS_APPLICATION_NAME, consumerKey, client);

    EntityUtils.consume(response.getEntity());

    Header locationHeader = response.getFirstHeader(OAuth2Constant.HTTP_RESPONSE_HEADER_LOCATION);

    String callbackUrl = DataExtractUtil.getParamFromURIString(locationHeader.getValue(), "callbackUrl");
    String[] urlParts = locationHeader.getValue().split("\\?");

    List<NameValuePair> urlParameters = new ArrayList<>();
    urlParameters.add(new BasicNameValuePair("fingerprint", "fingerprint"));
    urlParameters.add(new BasicNameValuePair("callbackUrl", callbackUrl));

    response = sendPostRequestWithParameters(client, urlParameters, urlParts[0]);
    locationHeader = response.getFirstHeader(OAuth2Constant.HTTP_RESPONSE_HEADER_LOCATION);
    EntityUtils.consume(response.getEntity());

    response = sendGetRequest(client, locationHeader.getValue());
    locationHeader = response.getFirstHeader(OAuth2Constant.HTTP_RESPONSE_HEADER_LOCATION);
    EntityUtils.consume(response.getEntity());

    Assert.assertNotNull(locationHeader, "Login response header is null");
    locationHeader = handleConsent(locationHeader);

    URL clientUrl = new URL(locationHeader.getValue());
    Assert.assertTrue(clientUrl.getQuery().contains("code="),
            "Authentication flow was un-successful with " + "risk based login");
}

From source file:cn.openwatch.internal.http.loopj.AsyncHttpClient.java

/**
 * Will encode url, if not disabled, and adds params on the end of it
 *
 * @param url             String with URL, should be valid URL without params
 * @param params          RequestParams to be appended on the end of URL
 * @param shouldEncodeUrl whether url should be encoded (replaces spaces with %20)
 * @return encoded url if requested with params appended if any available
 *///from   w  ww .j a  v a  2 s. co m
public static String getUrlWithQueryString(boolean shouldEncodeUrl, String url, RequestParams params) {
    if (url == null)
        return null;

    if (shouldEncodeUrl) {
        try {
            String decodedURL = URLDecoder.decode(url, "UTF-8");
            URL _url = new URL(decodedURL);
            URI _uri = new URI(_url.getProtocol(), _url.getUserInfo(), _url.getHost(), _url.getPort(),
                    _url.getPath(), _url.getQuery(), _url.getRef());
            url = _uri.toASCIIString();
        } catch (Exception ex) {
            // Should not really happen, added just for sake of validity
        }
    }

    if (params != null) {
        // Construct the query string and trim it, in case it
        // includes any excessive white spaces.
        String paramString = params.getParamString().trim();

        // Only add the query string if it isn't empty and it
        // isn't equal to '?'.
        if (!paramString.equals("") && !paramString.equals("?")) {
            url += url.contains("?") ? "&" : "?";
            url += paramString;
        }
    }

    return url;
}

From source file:edu.si.services.sidora.rest.batch.beans.BatchRequestControllerBean.java

/**
 * Check Resource MimeType using Apache Tika
 * @param exchange//from   w  w w .  ja  va 2s.  c o  m
 * @throws URISyntaxException
 * @throws MalformedURLException
 */
public void getMIMEType(Exchange exchange) throws URISyntaxException, MalformedURLException {

    /**
     * TODO:
     *
     * Need to make sure that mimetypes are consistent with what's used in workbench.
     * See link for workbench mimetype list
     *
     * https://github.com/Smithsonian/sidora-workbench/blob/master/workbench/includes/utils.inc#L1119
     *
     */

    out = exchange.getIn();

    URL url = new URL(out.getHeader("resourceFile", String.class));

    URI uri = new URI(url.getProtocol(), url.getUserInfo(), url.getHost(), url.getPort(), url.getPath(),
            url.getQuery(), url.getRef());

    String resourceFile = uri.toASCIIString();
    String resourceFileExt = FilenameUtils.getExtension(resourceFile);
    String mimeType = null;

    if (resourceFileExt.equalsIgnoreCase("nef")) {
        mimeType = "image/x-nikon-nef";
    } else if (resourceFileExt.equalsIgnoreCase("dng")) {
        mimeType = "image/x-adobe-dng";
    } else {
        LOG.debug("Checking {} for MIME Type", resourceFile);

        mimeType = new Tika().detect(resourceFile);
    }

    LOG.debug("Batch Process " + resourceFile + " || MIME=" + mimeType);

    out.setHeader("dsMIME", mimeType);
}

From source file:com.digitalpebble.stormcrawler.filtering.basic.BasicURLNormalizer.java

/**
 * Basic filter to remove query parameters from urls so parameters that
 * don't change the content of the page can be removed. An example would be
 * a google analytics query parameter like "utm_campaign" which might have
 * several different values for a url that points to the same content.
 *///from   w ww .  jav  a 2  s. com
private String filterQueryElements(String urlToFilter) {
    try {
        // Handle illegal characters by making a url first
        // this will clean illegal characters like |
        URL url = new URL(urlToFilter);

        if (StringUtils.isEmpty(url.getQuery())) {
            return urlToFilter;
        }

        List<NameValuePair> pairs = new ArrayList<>();
        URLEncodedUtils.parse(pairs, new Scanner(url.getQuery()), "UTF-8");
        Iterator<NameValuePair> pairsIterator = pairs.iterator();
        while (pairsIterator.hasNext()) {
            NameValuePair param = pairsIterator.next();
            if (queryElementsToRemove.contains(param.getName())) {
                pairsIterator.remove();
            }
        }

        StringBuilder newFile = new StringBuilder();
        if (url.getPath() != null) {
            newFile.append(url.getPath());
        }
        if (!pairs.isEmpty()) {
            Collections.sort(pairs, comp);
            String newQueryString = URLEncodedUtils.format(pairs, StandardCharsets.UTF_8);
            newFile.append('?').append(newQueryString);
        }
        if (url.getRef() != null) {
            newFile.append('#').append(url.getRef());
        }

        return new URL(url.getProtocol(), url.getHost(), url.getPort(), newFile.toString()).toString();
    } catch (MalformedURLException e) {
        LOG.warn("Invalid urlToFilter {}. {}", urlToFilter, e);
        return null;
    }
}

From source file:saintybalboa.nutch.net.AdvancedURLNormalizer.java

public String normalize(String urlString, String scope) throws MalformedURLException {
    if ("".equals(urlString)) // permit empty
        return urlString;

    urlString = urlString.trim(); // remove extra spaces
    String ourlString = urlString;
    URL url = new URL(urlString);

    String protocol = url.getProtocol();
    String host = url.getHost().toLowerCase();
    int port = url.getPort();
    String path = url.getPath().toLowerCase();
    String queryStr = url.getQuery();

    boolean changed = false;

    if (!urlString.startsWith(protocol)) // protocol was lowercased
        changed = true;/*  w ww .  j av  a 2s.  c o  m*/

    if ("http".equals(protocol) || "https".equals(protocol) || "ftp".equals(protocol)) {

        if (host != null) {
            String newHost = host.toLowerCase(); // lowercase host
            if (!host.equals(newHost)) {
                host = newHost;
                changed = true;
            }
        }

        if (port == url.getDefaultPort()) { // uses default port
            port = -1; // so don't specify it
            changed = true;
        }

        if (url.getRef() != null) { // remove the ref
            changed = true;
        }

        if (queryStr != null) {
            if (!queryStr.isEmpty() && queryStr != "?") {

                changed = true;

                //convert query param names values to lowercase. Dependent on arguments
                queryStr = formatQueryString(queryStr);
            }
        }

    }

    urlString = (queryStr != null && queryStr != "")
            ? new URL(protocol, host, port, path).toString() + "?" + queryStr
            : new URL(protocol, host, port, path).toString();

    //the url should be the same as the url passed in
    if (ourlString.length() > urlString.length())
        urlString = urlString + ourlString.substring(urlString.length(), ourlString.length());

    return urlString;
}

From source file:net.sf.jabref.external.DownloadExternalFile.java

/**
 * Look for the last '.' in the link, and return the following characters.
 * This gives the extension for most reasonably named links.
 *
 * @param link The link//www. j ava  2 s  . c  om
 * @return The suffix, excluding the dot (e.g. "pdf")
 */
private String getSuffix(final String link) {
    String strippedLink = link;
    try {
        // Try to strip the query string, if any, to get the correct suffix:
        URL url = new URL(link);
        if ((url.getQuery() != null) && (url.getQuery().length() < (link.length() - 1))) {
            strippedLink = link.substring(0, link.length() - url.getQuery().length() - 1);
        }
    } catch (MalformedURLException e) {
        // Don't report this error, since this getting the suffix is a non-critical
        // operation, and this error will be triggered and reported elsewhere.
    }
    // First see if the stripped link gives a reasonable suffix:
    String suffix;
    int strippedLinkIndex = strippedLink.lastIndexOf('.');
    if ((strippedLinkIndex <= 0) || (strippedLinkIndex == (strippedLink.length() - 1))) {
        suffix = null;
    } else {
        suffix = strippedLink.substring(strippedLinkIndex + 1);
    }
    if (!ExternalFileTypes.getInstance().isExternalFileTypeByExt(suffix)) {
        // If the suffix doesn't seem to give any reasonable file type, try
        // with the non-stripped link:
        int index = link.lastIndexOf('.');
        if ((index <= 0) || (index == (link.length() - 1))) {
            // No occurrence, or at the end
            // Check if there are path separators in the suffix - if so, it is definitely
            // not a proper suffix, so we should give up:
            if (strippedLink.substring(strippedLinkIndex + 1).indexOf('/') >= 1) {
                return "";
            } else {
                return suffix; // return the first one we found, anyway.
            }
        } else {
            // Check if there are path separators in the suffix - if so, it is definitely
            // not a proper suffix, so we should give up:
            if (link.substring(index + 1).indexOf('/') >= 1) {
                return "";
            } else {
                return link.substring(index + 1);
            }
        }
    } else {
        return suffix;
    }

}

From source file:jp.igapyon.selecrawler.SeleCrawlerWebContentGetter.java

public File getFileHtml(final String deviceName, final String urlLookup) throws IOException {
    final URL url = new URL(urlLookup);
    final String serverhostname = url.getHost();
    String path = url.getPath();//from www  . j  a  v  a  2  s  .co  m
    if (path.length() == 0 || path.equals("/") || path.endsWith("/")) {
        path = path + "/index.html";
    }

    if (url.getQuery() != null) {
        try {
            path += new URLCodec().encode("?" + url.getQuery());
        } catch (EncoderException e) {
            e.printStackTrace();
        }

    }

    return new File(settings.getPathTargetDir() + deviceName + "/" + serverhostname + path);
}

From source file:de.bayern.gdi.processor.AtomDownloadJob.java

@Override
protected void download() throws JobExecutionException {
    Document ds = getDocument(figureoutDatasource());
    HashMap<String, String> vars = new HashMap<>();
    vars.put("VARIATION", this.variation);
    NodeList nl = (NodeList) XML.xpath(ds, XPATH_LINKS, XPathConstants.NODESET, NAMESPACE_CONTEXT, vars);

    ArrayList<DLFile> files = new ArrayList<>(nl.getLength());

    String format = "%0" + places(nl.getLength()) + "d.%s";
    for (int i = 0, j = 0, n = nl.getLength(); i < n; i++) {
        Element link = (Element) nl.item(i);
        String href = link.getAttribute("href");
        if (href.isEmpty()) {
            continue;
        }//from  w  w  w .  ja  va  2  s  .com
        URL dataURL = toURL(href);
        String fileName;
        // Service call?
        if (dataURL.getQuery() != null) {
            String type = link.getAttribute("type");
            String ext = mimetypeToExt(type);
            fileName = String.format(format, j, ext);
            j++;
        } else { // Direct download.
            // XXX: Do more to prevent directory traversals?
            fileName = new File(dataURL.getPath()).getName().replaceAll("\\.+", ".");

            if (fileName.isEmpty()) {
                String type = link.getAttribute("type");
                String ext = mimetypeToExt(type);
                fileName = String.format(format, j, ext);
                j++;
            }
        }
        File file = new File(this.workingDir, fileName);
        files.add(new DLFile(file, dataURL));
    }

    int failed = 0;
    int numFiles = files.size();

    for (;;) {
        for (int i = 0; i < files.size();) {
            DLFile file = files.get(i);
            if (downloadFile(file)) {
                files.remove(i);
            } else {
                if (++file.tries < MAX_TRIES) {
                    i++;
                } else {
                    failed++;
                    files.remove(i);
                }
            }
            broadcastMessage(
                    I18n.format("atom.downloaded.files", numFiles - failed - files.size(), files.size()));
        }
        if (files.isEmpty()) {
            break;
        }
        try {
            Thread.sleep(FAIL_SLEEP);
        } catch (InterruptedException ie) {
            break;
        }
    }

    log.log(Level.INFO, "Bytes downloaded: " + this.totalCount);

    if (failed > 0) {
        throw new JobExecutionException(I18n.format("atom.downloaded.failed", numFiles - failed, failed));
    }

    broadcastMessage(I18n.format("atom.downloaded.success", numFiles));
}

From source file:opendap.threddsHandler.ThreddsCatalogUtil.java

public static String getUrlInfo(URL url) throws InterruptedException {
    String info = "URL:\n";

    info += "    getHost():         " + url.getHost() + "\n";
    info += "    getAuthority():    " + url.getAuthority() + "\n";
    info += "    getFile():         " + url.getFile() + "\n";
    info += "    getSystemPath():         " + url.getPath() + "\n";
    info += "    getDefaultPort():  " + url.getDefaultPort() + "\n";
    info += "    getPort():         " + url.getPort() + "\n";
    info += "    getProtocol():     " + url.getProtocol() + "\n";
    info += "    getQuery():        " + url.getQuery() + "\n";
    info += "    getRef():          " + url.getRef() + "\n";
    info += "    getUserInfo():     " + url.getUserInfo() + "\n";

    return info;//w w  w  .ja  v  a 2  s  .  com
}

From source file:org.bibsonomy.scraper.url.kde.isi.IsiScraper.java

@Override
protected boolean scrapeInternal(ScrapingContext sc) throws ScrapingException {
    sc.setScraper(this);

    try {//from w w  w.  j  a v  a 2s  . co m

        final URL pageUrl = sc.getUrl();
        // get cookie
        final String cookie = WebUtils.getCookies(new URL("http://isiknowledge.com/?DestApp=UA"));

        // get sid from url
        final Matcher sidMatcher = sidPattern.matcher(pageUrl.getQuery());
        final String sid;
        if (sidMatcher.find())
            sid = sidMatcher.group(1);
        else
            throw new ScrapingFailureException("article ID not found in URL");

        // get selectedIds from given page 
        final Matcher selectedIdsMatcher = selectedIdsPattern.matcher(
                WebUtils.getContentAsString(pageUrl, cookie + ";SID=" + sid + ";CUSTOMER=FAK Consortium"));
        final String selectedIds;
        if (selectedIdsMatcher.find())
            selectedIds = selectedIdsMatcher.group(1);
        else
            throw new ScrapingFailureException("selected publications not found (selectedIds is missing)");

        // build post request for getting bibtex download page

        // post content
        final String postData = "action=go&" + "mode=quickOutput&" + "product=UA&" + "SID=" + sid + "&"
                + "format=save&" + "fields=FullNoCitRef&" + "mark_id=WOS&" + "count_new_items_marked=0&"
                + "selectedIds=" + selectedIds + "&" + "qo_fields=fullrecord&" + "save_options=bibtex&"
                + "save.x=27&" + "save.y=12&" + "next_mode=&" + "redirect_url= ";

        // call post request
        final String content = WebUtils.getPostContentAsString(cookie, new URL(BASE_URL_1), postData);

        // extract direct bibtex download link from post result
        final Matcher downloadLinkMatcher = downloadLinkPattern.matcher(content);
        final URL downloadUrl;
        if (downloadLinkMatcher.find())
            downloadUrl = new URL(BASE_URL_2 + downloadLinkMatcher.group(1));
        else
            throw new ScrapingFailureException("cannot find BibTeX download link");

        // get bibtex
        final String bibtex = WebUtils.getContentAsString(downloadUrl, cookie);

        if (bibtex != null) {
            sc.setBibtexResult(bibtex);
            return true;
        }
    } catch (IOException ex) {
        throw new InternalFailureException(ex);
    }

    return false;
}