Example usage for java.net URLConnection connect

List of usage examples for java.net URLConnection connect

Introduction

In this page you can find the example usage for java.net URLConnection connect.

Prototype

public abstract void connect() throws IOException;

Source Link

Document

Opens a communications link to the resource referenced by this URL, if such a connection has not already been established.

Usage

From source file:org.apache.ojb.broker.metadata.RepositoryPersistor.java

/**
 *
 * TODO: We should re-design the configuration file reading
 *//*from w  w w. jav  a 2s . c o  m*/
private Object buildRepository(String repositoryFileName, Class targetRepository)
        throws MalformedURLException, ParserConfigurationException, SAXException, IOException {
    URL url = buildURL(repositoryFileName);
    /*
    arminw:
    strange, when using 'url.openStream()' argument repository
    could not be parsed
    ipriha:
    parser needs a base url to find referenced entities.
    */
    // InputSource source = new InputSource(url.openStream());

    String pathName = url.toExternalForm();

    log.info("Building repository from :" + pathName);
    InputSource source = new InputSource(pathName);
    URLConnection conn = url.openConnection();
    conn.setUseCaches(false);
    conn.connect();
    InputStream in = conn.getInputStream();
    source.setByteStream(in);
    try {
        return readMetadataFromXML(source, targetRepository);
    } finally {
        try {
            in.close();
        } catch (IOException x) {
            log.warn("unable to close repository input stream [" + x.getMessage() + "]", x);
        }
    }
}

From source file:org.ambraproject.service.search.SolrHttpServiceImpl.java

/**
 * @inheritDoc//  w w w  .  j  a  va  2 s .  c  o m
 */
public Document makeSolrRequestForRss(String queryString) throws SolrException {

    if (solrUrl == null || solrUrl.isEmpty()) {
        setSolrUrl(config.getString(URL_CONFIG_PARAM));
    }

    queryString = "?" + queryString;

    URL url;
    String urlString = solrUrl + queryString;
    log.debug("Making Solr http request to " + urlString);
    try {
        url = new URL(urlString);
    } catch (MalformedURLException e) {
        throw new SolrException("Bad Solr Url: " + urlString, e);
    }

    InputStream urlStream = null;
    Document doc = null;
    try {
        URLConnection connection = url.openConnection();
        connection.setConnectTimeout(CONNECTION_TIMEOUT);
        connection.connect();
        urlStream = connection.getInputStream();
        DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
        factory.setNamespaceAware(true);
        DocumentBuilder builder = factory.newDocumentBuilder();
        doc = builder.parse(urlStream);
    } catch (IOException e) {
        throw new SolrException("Error connecting to the Solr server at " + solrUrl, e);
    } catch (ParserConfigurationException e) {
        throw new SolrException("Error configuring parser xml parser for solr response", e);
    } catch (SAXException e) {
        throw new SolrException("Solr Returned bad XML for url: " + urlString, e);
    } finally {
        //Close the input stream
        if (urlStream != null) {
            try {
                urlStream.close();
            } catch (IOException e) {
                log.error("Error closing url stream to Solr", e);
            }
        }
    }

    return doc;
}

From source file:ubic.basecode.ontology.OntologyLoader.java

/**
 * Load an ontology into memory. Use this type of model when fast access is critical and memory is available.
 * If load from URL fails, attempt to load from disk cache under @cacheName.
 * /*from   w  w  w . java2 s  . co  m*/
 * @param  url
 * @param  spec      e.g. OWL_MEM_TRANS_INF
 * @param  cacheName unique name of this ontology, will be used to load from disk in case of failed url connection
 * @return
 */
public static OntModel loadMemoryModel(String url, OntModelSpec spec, String cacheName) {
    StopWatch timer = new StopWatch();
    timer.start();
    OntModel model = getMemoryModel(url, spec);

    URLConnection urlc = null;
    int tries = 0;
    while (tries < MAX_CONNECTION_TRIES) {
        try {
            urlc = new URL(url).openConnection();
            // help ensure mis-configured web servers aren't causing trouble.
            urlc.setRequestProperty("Accept", "application/rdf+xml");

            try {
                HttpURLConnection c = (HttpURLConnection) urlc;
                c.setInstanceFollowRedirects(true);
            } catch (ClassCastException e) {
                // not via http, using a FileURLConnection.
            }

            if (tries > 0) {
                log.info("Retrying connecting to " + url + " [" + tries + "/" + MAX_CONNECTION_TRIES
                        + " of max tries");
            } else {
                log.info("Connecting to " + url);
            }

            urlc.connect(); // Will error here on bad URL

            if (urlc instanceof HttpURLConnection) {
                String newUrl = urlc.getHeaderField("Location");

                if (StringUtils.isNotBlank(newUrl)) {
                    log.info("Redirect to " + newUrl);
                    urlc = new URL(newUrl).openConnection();
                    // help ensure mis-configured web servers aren't causing trouble.
                    urlc.setRequestProperty("Accept", "application/rdf+xml");
                    urlc.connect();
                }
            }

            break;
        } catch (IOException e) {
            // try to recover.
            log.error(e + " retrying?");
            tries++;
        }
    }

    if (urlc != null) {
        try (InputStream in = urlc.getInputStream();) {
            Reader reader;
            if (cacheName != null) {
                // write tmp to disk
                File tempFile = getTmpDiskCachePath(cacheName);
                if (tempFile == null) {
                    reader = new InputStreamReader(in);
                } else {
                    tempFile.getParentFile().mkdirs();
                    Files.copy(in, tempFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
                    reader = new FileReader(tempFile);
                }

            } else {
                // Skip the cache
                reader = new InputStreamReader(in);
            }

            assert reader != null;
            try (BufferedReader buf = new BufferedReader(reader);) {
                model.read(buf, url);
            }

            log.info("Load model: " + timer.getTime() + "ms");
        } catch (IOException e) {
            log.error(e.getMessage(), e);
        }
    }

    if (cacheName != null) {

        File f = getDiskCachePath(cacheName);
        File tempFile = getTmpDiskCachePath(cacheName);
        File oldFile = getOldDiskCachePath(cacheName);

        if (model.isEmpty()) {
            // Attempt to load from disk cache

            if (f == null) {
                throw new RuntimeException(
                        "Ontology cache directory required to load from disk: ontology.cache.dir");
            }

            if (f.exists() && !f.isDirectory()) {
                try (BufferedReader buf = new BufferedReader(new FileReader(f));) {
                    model.read(buf, url);
                    // We successfully loaded the cached ontology. Copy the loaded ontology to oldFile
                    // so that we don't recreate indices during initialization based on a false change in
                    // the ontology.
                    Files.copy(f.toPath(), oldFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
                    log.info("Load model from disk: " + timer.getTime() + "ms");
                } catch (IOException e) {
                    log.error(e.getMessage(), e);
                    throw new RuntimeException(
                            "Ontology failed load from URL (" + url + ") and disk cache: " + cacheName);
                }
            } else {
                throw new RuntimeException("Ontology failed load from URL (" + url
                        + ") and disk cache does not exist: " + cacheName);
            }

        } else {
            // Model was successfully loaded into memory from URL with given cacheName
            // Save cache to disk (rename temp file)
            log.info("Caching ontology to disk: " + cacheName);
            if (f != null) {
                try {
                    // Need to compare previous to current so instead of overwriting we'll move the old file
                    f.createNewFile();
                    Files.move(f.toPath(), oldFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
                    Files.move(tempFile.toPath(), f.toPath(), StandardCopyOption.REPLACE_EXISTING);
                } catch (IOException e) {
                    log.error(e.getMessage(), e);
                }
            } else {
                log.warn("Ontology cache directory required to save to disk: ontology.cache.dir");
            }
        }

    }

    assert !model.isEmpty();

    return model;
}

From source file:org.codice.ddf.admin.sources.utils.RequestUtils.java

/**
 * Attempts to open a connection to a URL.
 *
 * <p>Possible Error Codes to be returned - {@link
 * org.codice.ddf.admin.common.report.message.DefaultMessages#CANNOT_CONNECT}
 *
 * @param urlField {@link UrlField} containing the URL to connect to
 * @return a {@link Report} containing no messages on success, or containing {@link
 *     org.codice.ddf.admin.api.report.ErrorMessage}s on failure.
 *//*from   ww  w  . j  av  a 2 s  . c o m*/
public Report<Void> endpointIsReachable(UrlField urlField) {
    URLConnection urlConnection = null;
    try {
        urlConnection = new URL(urlField.getValue()).openConnection();
        urlConnection.setConnectTimeout(CLIENT_TIMEOUT_MILLIS);
        urlConnection.connect();
        LOGGER.debug("Successfully reached {}.", urlField);
    } catch (IOException e) {
        LOGGER.debug("Failed to reach {}, returning an error.", urlField, e);
        return Reports.from(cannotConnectError(urlField.getPath()));
    } finally {
        try {
            if (urlConnection != null) {
                urlConnection.getInputStream().close();
            }
        } catch (IOException e) {
            LOGGER.debug("Error closing connection stream.");
        }
    }
    return Reports.emptyReport();
}

From source file:org.ambraproject.service.search.SolrHttpServiceImpl.java

/**
 * @inheritDoc/*from  w ww  .  j av  a  2  s  .  c  o m*/
 */
@Override
public Document makeSolrRequest(Map<String, String> params) throws SolrException {
    if (solrUrl == null || solrUrl.isEmpty()) {
        setSolrUrl(config.getString(URL_CONFIG_PARAM));
    }

    //make sure the return type is xml
    if (!params.keySet().contains(RETURN_TYPE_PARAM) || !params.get(RETURN_TYPE_PARAM).equals(XML)) {
        params.put(RETURN_TYPE_PARAM, XML);
    }
    //make sure that we include a 'q' parameter
    if (!params.keySet().contains(Q_PARAM)) {
        params.put(Q_PARAM, NO_FILTER);
    }

    String queryString = "?";
    for (String param : params.keySet()) {
        String value = params.get(param);
        if (queryString.length() > 1) {
            queryString += "&";
        }
        queryString += (cleanInput(param) + "=" + cleanInput(value));
    }

    URL url;
    String urlString = solrUrl + queryString;
    log.debug("Making Solr http request to " + urlString);
    try {
        url = new URL(urlString);
    } catch (MalformedURLException e) {
        throw new SolrException("Bad Solr Url: " + urlString, e);
    }

    InputStream urlStream = null;
    Document doc = null;
    try {
        URLConnection connection = url.openConnection();
        connection.setConnectTimeout(CONNECTION_TIMEOUT);
        connection.connect();
        urlStream = connection.getInputStream();
        DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
        factory.setNamespaceAware(true);
        DocumentBuilder builder = factory.newDocumentBuilder();
        doc = builder.parse(urlStream);
    } catch (IOException e) {
        throw new SolrException("Error connecting to the Solr server at " + solrUrl, e);
    } catch (ParserConfigurationException e) {
        throw new SolrException("Error configuring parser xml parser for solr response", e);
    } catch (SAXException e) {
        throw new SolrException("Solr Returned bad XML for url: " + urlString, e);
    } finally {
        //Close the input stream
        if (urlStream != null) {
            try {
                urlStream.close();
            } catch (IOException e) {
                log.error("Error closing url stream to Solr", e);
            }
        }
    }

    return doc;
}

From source file:org.shredzone.commons.gravatar.impl.GravatarServiceImpl.java

/**
 * Fetches a Gravatar icon from the server and stores it in the given {@link File}.
 *
 * @param url//from   w  ww  .java 2s. c  om
 *            Gravatar URL to fetch
 * @param file
 *            {@link File} to store the icon to
 */
private void fetchGravatar(URL url, File file) throws IOException {
    limitUpstreamRequests();

    URLConnection conn = url.openConnection();
    conn.setConnectTimeout(TIMEOUT);
    conn.setReadTimeout(TIMEOUT);

    if (file.exists()) {
        conn.setIfModifiedSince(file.lastModified());
    }

    conn.connect();

    long lastModified = conn.getLastModified();
    if (lastModified > 0L && lastModified <= file.lastModified()) {
        // Cache file exists and is unchanged
        if (log.isDebugEnabled()) {
            log.debug("Cached Gravatar is still good: {}", url);
        }

        file.setLastModified(System.currentTimeMillis()); // touch
        return;
    }

    try (InputStream in = conn.getInputStream(); OutputStream out = new FileOutputStream(file)) {
        byte[] buffer = new byte[8192];
        int total = 0;
        int len;

        while ((len = in.read(buffer)) >= 0) {
            out.write(buffer, 0, len);
            total += len;
            if (total > MAX_GRAVATAR_SIZE) {
                log.warn("Gravatar exceeded maximum size: {}", url);
                break;
            }
        }

        out.flush();

        if (log.isDebugEnabled()) {
            log.debug("Downloaded Gravatar: {}", url);
        }
    }
}

From source file:com.wallabystreet.kinjo.common.transport.protocol.jxta.JXTAWebServiceHandler.java

public void invoke(MessageContext messageContext) throws AxisFault {

    // create and open a connection to the endpoint peer
    String target = messageContext.getStrProp(MessageContext.TRANS_URL);
    URL u = null;//from  w  w  w  . j  a v  a  2s . co  m
    try {
        u = new URL(target);
    } catch (MalformedURLException e) {
        ; // do nothing here
    }

    URLConnection c = null;
    try {
        c = u.openConnection();
    } catch (IOException e) {
        log.error("could not create connection", e);
        throw AxisFault.makeFault(e);
    }

    JxtaBiDiPipe pipe = null;
    try {
        c.connect();
        pipe = ((JXTAURLConnection) c).getConnectionPipe();
    } catch (IOException e) {
        log.error("could not connect", e);
        throw AxisFault.makeFault(e);
    }

    // extract the SOAP request from the message context
    net.jxta.endpoint.Message transportMessage = new net.jxta.endpoint.Message();

    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    try {
        messageContext.getRequestMessage().writeTo(baos);
    } catch (Exception e) {
        log.error("could not serialize message", e);
        throw AxisFault.makeFault(e);
    }

    // wrap the SOAP request into a JXTA transport message
    ByteArrayMessageElement bame = new ByteArrayMessageElement("message", null, baos.toByteArray(), null);
    transportMessage.addMessageElement(bame);

    try {
        // send the message over the connection pipe
        pipe.sendMessage(transportMessage);
    } catch (IOException e) {
        log.error("could not send message", e);
        throw AxisFault.makeFault(e);
    }

    // reset the transport message and wait for the SOAP response
    transportMessage = null;
    try {
        /*
         * XXX: At the moment, we wait forever (until the result message
         * arrives, that is) here. This seems to be a bad idea, since the
         * response message might take ages to arrive. Therefore, a timeout
         * should be introduced here.
         */
        transportMessage = pipe.getMessage(0);
    } catch (InterruptedException e) {
        log.error("interrupt occured during getMessage()", e);
        throw AxisFault.makeFault(e);
    }

    // unwrap the SOAP response
    ByteArrayMessageElement msgString = (ByteArrayMessageElement) transportMessage.getMessageElement("message");
    org.apache.axis.Message responseMessage = new org.apache.axis.Message(msgString.toString());

    // store the SOAP response in the message context
    messageContext.setResponseMessage(responseMessage);
}

From source file:ubic.gemma.loader.expression.geo.service.GeoBrowser.java

/**
 * Performs an E-utilities query of the GEO database with the given searchTerms.  
 * Returns at most pageSize records (if found) starting at record #start.
 * /* w ww . jav  a2  s . c  o m*/
 * @param start
 * @param pageSize
 * @param searchTerms
 * @return list of GeoRecords
 * @throws IOException
 * @throws RunTimeException
 */
public List<GeoRecord> getGeoRecordsBySearchTerm(String searchTerms, int start, int pageSize)
        throws IOException, RuntimeException {

    List<GeoRecord> records = new ArrayList<GeoRecord>();

    try {
        URL searchUrl = new URL(
                ESEARCH + searchTerms + "&retstart=" + start + "&retmax=" + pageSize + "&usehistory=y");

        URLConnection conn = searchUrl.openConnection();
        conn.connect();
        InputStream is = conn.getInputStream();

        docFactory.setIgnoringComments(true);
        docFactory.setValidating(false);

        DocumentBuilder builder = docFactory.newDocumentBuilder();
        Document searchDocument = builder.parse(is);

        NodeList countNode = searchDocument.getElementsByTagName("Count");
        Node countEl = countNode.item(0);

        int count = 0;
        try {
            count = Integer.parseInt(XMLUtils.getTextValue((Element) countEl));
        } catch (NumberFormatException e) {
            throw new IOException("Could not parse count from: " + searchUrl);
        }

        if (count == 0)
            throw new IOException("Got no records from: " + searchUrl);

        NodeList qnode = searchDocument.getElementsByTagName("QueryKey");

        Element queryIdEl = (Element) qnode.item(0);

        NodeList cknode = searchDocument.getElementsByTagName("WebEnv");
        Element cookieEl = (Element) cknode.item(0);

        String queryId = XMLUtils.getTextValue(queryIdEl);
        String cookie = XMLUtils.getTextValue(cookieEl);

        URL fetchUrl = new URL(EFETCH + "&mode=mode.text" + "&query_key=" + queryId + "&retstart=" + start
                + "&retmax=" + pageSize + "&WebEnv=" + cookie);

        conn = fetchUrl.openConnection();
        conn.connect();
        is = conn.getInputStream();

        Document summaryDocument = builder.parse(is);

        XPathFactory xFactory = XPathFactory.newInstance();
        XPath xpath = xFactory.newXPath();

        // Get relevant data from the XML file
        xaccession = xpath.compile("//DocSum/Item[@Name='GSE']");
        xtitle = xpath.compile("//DocSum/Item[@Name='title']");
        xnumSamples = xpath.compile("//DocSum/Item[@Name='n_samples']");
        xreleaseDate = xpath.compile("//DocSum/Item[@Name='PDAT']");
        xorganisms = xpath.compile("//DocSum/Item[@Name='taxon']");

        Object accessions = xaccession.evaluate(summaryDocument, XPathConstants.NODESET);
        NodeList accNodes = (NodeList) accessions;

        Object titles = xtitle.evaluate(summaryDocument, XPathConstants.NODESET);
        NodeList titleNodes = (NodeList) titles;

        Object samples = xnumSamples.evaluate(summaryDocument, XPathConstants.NODESET);
        NodeList sampleNodes = (NodeList) samples;

        Object dates = xreleaseDate.evaluate(summaryDocument, XPathConstants.NODESET);
        NodeList dateNodes = (NodeList) dates;

        Object organisms = xorganisms.evaluate(summaryDocument, XPathConstants.NODESET);
        NodeList orgnNodes = (NodeList) organisms;

        // Create GeoRecords using information parsed from XML file 
        for (int i = 0; i < accNodes.getLength(); i++) {

            GeoRecord record = new GeoRecord();

            record.setGeoAccession("GSE" + accNodes.item(i).getTextContent());

            record.setTitle(titleNodes.item(i).getTextContent());

            record.setNumSamples(Integer.parseInt(sampleNodes.item(i).getTextContent()));

            Date date = DateUtil.convertStringToDate("yyyy/MM/dd", dateNodes.item(i).getTextContent());
            record.setReleaseDate(date);

            record.setOrganisms(getTaxonCollection(orgnNodes.item(i).getTextContent()));

            records.add(record);
        }

    } catch (MalformedURLException e) {
        throw new RuntimeException(e);
    } catch (ParserConfigurationException e) {
        throw new RuntimeException(e);
    } catch (SAXException e) {
        throw new RuntimeException(e);
    } catch (XPathException e) {
        throw new RuntimeException(e);
    } catch (ParseException e) {
        throw new RuntimeException(e);
    }

    if (records.isEmpty()) {
        log.warn("No records obtained");
    }

    return records;

}

From source file:ResourceServlet.java

public void doGet(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {

    //get web.xml for display by a servlet
    String file = "/WEB-INF/web.xml";

    URL url = null;/*from   w w w .  ja va 2 s  .  c om*/
    URLConnection urlConn = null;
    PrintWriter out = null;
    BufferedInputStream buf = null;
    try {
        out = response.getWriter();
        url = getServletContext().getResource(file);
        //set response header
        response.setContentType("text/xml");

        urlConn = url.openConnection();
        //establish connection with URL presenting web.xml
        urlConn.connect();
        buf = new BufferedInputStream(urlConn.getInputStream());
        int readBytes = 0;
        while ((readBytes = buf.read()) != -1)
            out.write(readBytes);
    } catch (MalformedURLException mue) {
        throw new ServletException(mue.getMessage());
    } catch (IOException ioe) {
        throw new ServletException(ioe.getMessage());
    } finally {
        if (out != null)
            out.close();
        if (buf != null)
            buf.close();
    }
}

From source file:ome.system.UpgradeCheck.java

/**
 * If the {@link #url} has been set to null or the empty string, then no
 * upgrade check will be performed (silently). If however the string is an
 * invalid URL, a warning will be printed.
 * //from   w w w.  java 2s.c  o  m
 * This method should <em>never</em> throw an exception.
 */
public void run() {

    // If null or empty, the upgrade check is disabled.
    if (url == null || url.length() == 0) {
        return; // EARLY EXIT!
    }

    StringBuilder query = new StringBuilder();
    try {
        query.append(url);
        query.append("?version=");
        query.append(URLEncoder.encode(version, "UTF-8"));
        query.append(";os.name=");
        query.append(URLEncoder.encode(System.getProperty("os.name"), "UTF-8"));
        query.append(";os.arch=");
        query.append(URLEncoder.encode(System.getProperty("os.arch"), "UTF-8"));
        query.append(";os.version=");
        query.append(URLEncoder.encode(System.getProperty("os.version"), "UTF-8"));
        query.append(";java.runtime.version=");
        query.append(URLEncoder.encode(System.getProperty("java.runtime.version"), "UTF-8"));
        query.append(";java.vm.vendor=");
        query.append(URLEncoder.encode(System.getProperty("java.vm.vendor"), "UTF-8"));
    } catch (UnsupportedEncodingException uee) {
        // Internal issue
        set(null, uee);
        return;
    }

    URL _url;
    try {
        _url = new URL(query.toString());
    } catch (Exception e) {
        set(null, e);
        log.error("Invalid URL: " + query.toString());
        return;
    }

    BufferedInputStream bufIn = null;
    try {
        URLConnection conn = _url.openConnection();
        conn.setUseCaches(false);
        conn.addRequestProperty("User-Agent", agent);
        conn.setConnectTimeout(timeout);
        conn.setReadTimeout(timeout);
        conn.connect();

        log.debug("Attempting to connect to " + query);

        InputStream in = conn.getInputStream();
        bufIn = new BufferedInputStream(in);

        StringBuilder sb = new StringBuilder();
        while (true) {
            int data = bufIn.read();
            if (data == -1) {
                break;
            } else {
                sb.append((char) data);
            }
        }
        String result = sb.toString();
        if (result.length() == 0) {
            log.info("no update needed");
            set(null, null);
        } else {
            log.warn("UPGRADE AVAILABLE:" + result);
            set(result, null);
        }
    } catch (UnknownHostException uhe) {
        log.error("Unknown host:" + url);
        set(null, uhe);
    } catch (IOException ioe) {
        log.error(String.format("Error reading from url: %s \"%s\"", query, ioe.getMessage()));
        set(null, ioe);
    } catch (Exception ex) {
        log.error("Unknown exception thrown on UpgradeCheck", ex);
        set(null, ex);
    } finally {
        Utils.closeQuietly(bufIn);
    }
}