List of usage examples for java.net URLConnection connect
public abstract void connect() throws IOException;
From source file:net.sf.eclipsecs.core.config.configtypes.ConfigurationType.java
/** * {@inheritDoc}/* w w w . ja va 2s. c o m*/ */ public CheckstyleConfigurationFile getCheckstyleConfiguration(ICheckConfiguration checkConfiguration) throws CheckstylePluginException { CheckstyleConfigurationFile data = new CheckstyleConfigurationFile(); try { // resolve the true configuration file URL data.setResolvedConfigFileURL(resolveLocation(checkConfiguration)); URLConnection connection = data.getResolvedConfigFileURL().openConnection(); connection.connect(); // get last modification timestamp data.setModificationStamp(connection.getLastModified()); // get the configuration file data byte[] configurationFileData = getBytesFromURLConnection(connection); data.setCheckConfigFileBytes(configurationFileData); // get the properties bundle byte[] additionalPropertiesBytes = getAdditionPropertiesBundleBytes(data.getResolvedConfigFileURL()); data.setAdditionalPropertyBundleBytes(additionalPropertiesBytes); // get the property resolver PropertyResolver resolver = getPropertyResolver(checkConfiguration, data); data.setPropertyResolver(resolver); } catch (IOException e) { CheckstylePluginException.rethrow(e); } return data; }
From source file:ubic.gemma.core.loader.expression.geo.service.GeoBrowser.java
/** * Retrieves and parses tab delimited file from GEO. File contains pageSize GEO records starting from startPage. * * @param startPage start page//from www . j a v a2s . c om * @param pageSize page size * @return list of GeoRecords * @throws IOException if there is a problem while manipulating the file * @throws ParseException if there is a parsing problem */ public List<GeoRecord> getRecentGeoRecords(int startPage, int pageSize) throws IOException, ParseException { if (startPage < 0 || pageSize < 0) throw new IllegalArgumentException("Values must be greater than zero "); List<GeoRecord> records = new ArrayList<>(); URL url; try { url = new URL(GEO_BROWSE_URL + startPage + GEO_BROWSE_SUFFIX + pageSize); } catch (MalformedURLException e) { throw new RuntimeException("Invalid URL: " + GEO_BROWSE_URL + startPage + GEO_BROWSE_SUFFIX + pageSize, e); } URLConnection conn = url.openConnection(); conn.connect(); try (InputStream is = conn.getInputStream(); BufferedReader br = new BufferedReader(new InputStreamReader(is))) { // We are getting a tab delimited file. // Read columns headers. String headerLine = br.readLine(); String[] headers = StringUtil.csvSplit(headerLine); // Map column names to their indices (handy later). Map<String, Integer> columnNameToIndex = new HashMap<>(); for (int i = 0; i < headers.length; i++) { columnNameToIndex.put(headers[i], i); } // Read the rest of the file. String line; while ((line = br.readLine()) != null) { String[] fields = StringUtil.csvSplit(line); GeoRecord geoRecord = new GeoRecord(); geoRecord.setGeoAccession(fields[columnNameToIndex.get("Accession")]); geoRecord.setTitle(StringUtils.strip( fields[columnNameToIndex.get("Title")].replaceAll(GeoBrowser.FLANKING_QUOTES_REGEX, ""))); String sampleCountS = fields[columnNameToIndex.get("Sample Count")]; if (StringUtils.isNotBlank(sampleCountS)) { try { geoRecord.setNumSamples(Integer.parseInt(sampleCountS)); } catch (NumberFormatException e) { throw new RuntimeException("Could not parse sample count: " + sampleCountS); } } else { GeoBrowser.log.warn("No sample count for " + geoRecord.getGeoAccession()); } geoRecord.setContactName( fields[columnNameToIndex.get("Contact")].replaceAll(GeoBrowser.FLANKING_QUOTES_REGEX, "")); String[] taxons = fields[columnNameToIndex.get("Taxonomy")] .replaceAll(GeoBrowser.FLANKING_QUOTES_REGEX, "").split(";"); geoRecord.getOrganisms().addAll(Arrays.asList(taxons)); Date date = DateUtils.parseDate(fields[columnNameToIndex.get("Release Date")] .replaceAll(GeoBrowser.FLANKING_QUOTES_REGEX, ""), DATE_FORMATS); geoRecord.setReleaseDate(date); geoRecord.setSeriesType(fields[columnNameToIndex.get("Series Type")]); records.add(geoRecord); } } if (records.isEmpty()) { GeoBrowser.log.warn("No records obtained"); } return records; }
From source file:com.cellbots.communication.CustomHttpCommChannel.java
@Override public void listenForMessages(final long waitTimeBetweenPolling, final boolean returnStream) { if (inUrl == null || doDisconnect) return;/*from ww w . j ava 2 s . c o m*/ stopReading = false; listenThread = new Thread(new Runnable() { @Override public void run() { Looper.prepare(); while (!stopReading) { try { if (waitTimeBetweenPolling >= 0) Thread.sleep(waitTimeBetweenPolling); if (commandUrl == null) { commandUrl = new URL(inUrl); } URLConnection cn = commandUrl.openConnection(); cn.connect(); if (returnStream) { if (mMessageListener != null) { mMessageListener.onMessage(new CommMessage(null, cn.getInputStream(), null, null, null, mChannelName, CommunicationManager.CHANNEL_HTTP)); } } else { BufferedReader rd = new BufferedReader(new InputStreamReader(cn.getInputStream()), 1024); String cmd = rd.readLine(); if (cmd != null && !cmd.equals(prevCmd) && mMessageListener != null) { prevCmd = cmd; // TODO (chaitanyag): Change this after we come // up with a better protocol. The first (space // separated) token in the command string could // be a timestamp. This is useful if the same // commands are sent back to back. For example, // the controller sends consecutive "hu" // (head up) commands to tilt the head up in // small increments. if (cmd.indexOf(' ') >= 0) { try { Long.parseLong(cmd.substring(0, cmd.indexOf(' '))); cmd = cmd.substring(cmd.indexOf(' ') + 1); } catch (NumberFormatException e) { } } mMessageListener.onMessage(new CommMessage(cmd, null, null, null, null, mChannelName, CommunicationManager.CHANNEL_HTTP)); } } if (waitTimeBetweenPolling < 0) { // Do not repeat this loop break; } } catch (MalformedURLException e) { Log.e(TAG, "Error processing URL: " + e.getMessage()); } catch (IOException e) { Log.e(TAG, "Error reading command from URL: " + commandUrl + " : " + e.getMessage()); } catch (InterruptedException e) { e.printStackTrace(); } } } }); listenThread.start(); }
From source file:dk.netarkivet.common.distribute.HTTPRemoteFile.java
/** * Invalidate all file handles, by asking the remote registry to remove the * url for this remote file from the list of shared files. * Invalidating a file handle may delete the original files, if deletable. * This method does not throw exceptions, but will warn on errors. *///from w ww.j a v a 2 s. c o m public void cleanup() { if (filesize == 0) { return; } try { URLConnection urlConnection = getRegistry().openConnection(getRegistry().getCleanupUrl(url)); urlConnection.setUseCaches(false); urlConnection.connect(); urlConnection.getInputStream(); } catch (IOException e) { log.warn("Unable to cleanup file '" + file.getAbsolutePath() + "' with URL'" + url + "'", e); } }
From source file:com.nextgis.mobile.util.ApkDownloader.java
@Override protected String doInBackground(String... params) { try {//w ww. ja v a 2s. com URL url = new URL(params[0]); mApkPath = Environment.getExternalStorageDirectory() + "/download/" + Uri.parse(params[0]).getLastPathSegment(); URLConnection connection = url.openConnection(); if (url.getProtocol().equalsIgnoreCase("https")) connection.setRequestProperty("Authorization", params[1]); connection.connect(); int fileLength = connection.getContentLength() / 1024; InputStream input = new BufferedInputStream(connection.getInputStream()); OutputStream output = new FileOutputStream(mApkPath); byte data[] = new byte[1024]; long total = 0; int count; while ((count = input.read(data)) != -1) { total += count; publishProgress((int) total / 1024, fileLength); output.write(data, 0, count); } output.flush(); output.close(); input.close(); return null; } catch (MalformedURLException e) { return mActivity.getString(R.string.error_invalid_url); } catch (IOException e) { return mActivity.getString(R.string.error_network_unavailable); } }
From source file:net.hockeyapp.android.internal.CheckUpdateTask.java
@Override protected JSONArray doInBackground(String... args) { try {//from w w w. j av a 2s . co m int versionCode = getVersionCode(); JSONArray json = new JSONArray(VersionCache.getVersionInfo(activity)); if ((getCachingEnabled()) && (findNewVersion(json, versionCode))) { return json; } URL url = new URL(getURLString("json")); URLConnection connection = createConnection(url); connection.connect(); InputStream inputStream = new BufferedInputStream(connection.getInputStream()); String jsonString = convertStreamToString(inputStream); inputStream.close(); json = new JSONArray(jsonString); if (findNewVersion(json, versionCode)) { return json; } } catch (Exception e) { e.printStackTrace(); } return null; }
From source file:me.hqm.plugindev.wget.WGCommand.java
/** * Get a URL from an input string.// w ww .j a v a2 s .c o m * * @param input URL String * @return Valid URL */ private URL getUrl(String input) { // We only accept jar or zip files if (input.endsWith(".jar") || input.endsWith(".zip")) { // Try to create the object, and test the connection try { URI uri = new URI(input); URL url = uri.toURL(); URLConnection conn = url.openConnection(); conn.connect(); // Return the url return url; } catch (IOException | URISyntaxException ignored) { } } // Failed, return null return null; }
From source file:ubic.gemma.core.loader.expression.geo.service.GeoBrowser.java
/** * Performs an E-utilities query of the GEO database with the given searchTerms. Returns at most pageSize records * (if found) starting at record #start. * * @param start start/* w w w. j a v a 2 s. co m*/ * @param pageSize page size * @param searchTerms search terms * @return list of GeoRecords * @throws IOException if there is a problem while manipulating the file */ public List<GeoRecord> getGeoRecordsBySearchTerm(String searchTerms, int start, int pageSize) throws IOException, RuntimeException { List<GeoRecord> records = new ArrayList<>(); URL searchUrl = new URL( GeoBrowser.ESEARCH + searchTerms + "&retstart=" + start + "&retmax=" + pageSize + "&usehistory=y"); Document searchDocument; URLConnection conn = searchUrl.openConnection(); conn.connect(); try (InputStream is = conn.getInputStream()) { GeoBrowser.docFactory.setIgnoringComments(true); GeoBrowser.docFactory.setValidating(false); DocumentBuilder builder = GeoBrowser.docFactory.newDocumentBuilder(); searchDocument = builder.parse(is); } catch (ParserConfigurationException | SAXException e) { throw new RuntimeException(e); } NodeList countNode = searchDocument.getElementsByTagName("Count"); Node countEl = countNode.item(0); int count; try { count = Integer.parseInt(XMLUtils.getTextValue((Element) countEl)); } catch (NumberFormatException e) { throw new IOException("Could not parse count from: " + searchUrl); } if (count == 0) throw new IOException("Got no records from: " + searchUrl); NodeList qnode = searchDocument.getElementsByTagName("QueryKey"); Element queryIdEl = (Element) qnode.item(0); NodeList cknode = searchDocument.getElementsByTagName("WebEnv"); Element cookieEl = (Element) cknode.item(0); String queryId = XMLUtils.getTextValue(queryIdEl); String cookie = XMLUtils.getTextValue(cookieEl); URL fetchUrl = new URL(GeoBrowser.EFETCH + "&mode=mode.text" + "&query_key=" + queryId + "&retstart=" + start + "&retmax=" + pageSize + "&WebEnv=" + cookie); conn = fetchUrl.openConnection(); conn.connect(); Document summaryDocument; try (InputStream is = conn.getInputStream()) { DocumentBuilder builder = GeoBrowser.docFactory.newDocumentBuilder(); summaryDocument = builder.parse(is); XPathFactory xFactory = XPathFactory.newInstance(); XPath xpath = xFactory.newXPath(); // Get relevant data from the XML file XPathExpression xaccession = xpath.compile("//DocSum/Item[@Name='GSE']"); XPathExpression xtitle = xpath.compile("//DocSum/Item[@Name='title']"); XPathExpression xnumSamples = xpath.compile("//DocSum/Item[@Name='n_samples']"); XPathExpression xreleaseDate = xpath.compile("//DocSum/Item[@Name='PDAT']"); XPathExpression xorganisms = xpath.compile("//DocSum/Item[@Name='taxon']"); Object accessions = xaccession.evaluate(summaryDocument, XPathConstants.NODESET); NodeList accNodes = (NodeList) accessions; Object titles = xtitle.evaluate(summaryDocument, XPathConstants.NODESET); NodeList titleNodes = (NodeList) titles; Object samples = xnumSamples.evaluate(summaryDocument, XPathConstants.NODESET); NodeList sampleNodes = (NodeList) samples; Object dates = xreleaseDate.evaluate(summaryDocument, XPathConstants.NODESET); NodeList dateNodes = (NodeList) dates; Object organisms = xorganisms.evaluate(summaryDocument, XPathConstants.NODESET); NodeList orgnNodes = (NodeList) organisms; // Create GeoRecords using information parsed from XML file for (int i = 0; i < accNodes.getLength(); i++) { GeoRecord record = new GeoRecord(); record.setGeoAccession("GSE" + accNodes.item(i).getTextContent()); record.setTitle(titleNodes.item(i).getTextContent()); record.setNumSamples(Integer.parseInt(sampleNodes.item(i).getTextContent())); Date date = DateUtil.convertStringToDate("yyyy/MM/dd", dateNodes.item(i).getTextContent()); record.setReleaseDate(date); record.setOrganisms(this.getTaxonCollection(orgnNodes.item(i).getTextContent())); records.add(record); } if (records.isEmpty()) { GeoBrowser.log.warn("No records obtained"); } } catch (ParserConfigurationException | ParseException | XPathExpressionException | SAXException e) { throw new IOException("Could not parse data: " + searchUrl, e); } return records; }
From source file:ubic.gemma.loader.expression.geo.service.GeoBrowser.java
/** * Retrieves and parses tab delimited file from GEO. File contains pageSize * GEO records starting from startPage.//from w ww .java 2s.c om * * @param startPage * @param pageSize * @return list of GeoRecords * @throws IOException * @throws ParseException */ public List<GeoRecord> getRecentGeoRecords(int startPage, int pageSize) throws IOException, ParseException { if (startPage < 0 || pageSize < 0) throw new IllegalArgumentException("Values must be greater than zero "); List<GeoRecord> records = new ArrayList<GeoRecord>(); URL url = null; try { url = new URL(GEO_BROWSE_URL + startPage + GEO_BROWSE_SUFFIX + pageSize); } catch (MalformedURLException e) { throw new RuntimeException("Invalid URL " + url, e); } InputStream is = null; try { URLConnection conn = url.openConnection(); conn.connect(); is = conn.getInputStream(); } catch (IOException e) { log.error(e, e); throw e; } // We are getting a tab delimited file. BufferedReader br = new BufferedReader(new InputStreamReader(is)); // Read columns headers. String headerLine = br.readLine(); String[] headers = StringUtil.csvSplit(headerLine); // Map column names to their indices (handy later). Map<String, Integer> columnNameToIndex = new HashMap<String, Integer>(); for (int i = 0; i < headers.length; i++) { columnNameToIndex.put(headers[i], i); } // Read the rest of the file. String line = null; while ((line = br.readLine()) != null) { String[] fields = StringUtil.csvSplit(line); GeoRecord geoRecord = new GeoRecord(); geoRecord.setGeoAccession(fields[columnNameToIndex.get("Accession")]); geoRecord.setTitle(StringUtils .strip(fields[columnNameToIndex.get("Title")].replaceAll(FLANKING_QUOTES_REGEX, ""))); String sampleCountS = fields[columnNameToIndex.get("Sample Count")]; if (StringUtils.isNotBlank(sampleCountS)) { try { geoRecord.setNumSamples(Integer.parseInt(sampleCountS)); } catch (NumberFormatException e) { throw new RuntimeException("Could not parse sample count: " + sampleCountS); } } else { log.warn("No sample count for " + geoRecord.getGeoAccession()); } geoRecord .setContactName(fields[columnNameToIndex.get("Contact")].replaceAll(FLANKING_QUOTES_REGEX, "")); String[] taxons = fields[columnNameToIndex.get("Taxonomy")].replaceAll(FLANKING_QUOTES_REGEX, "") .split(";"); geoRecord.getOrganisms().addAll(Arrays.asList(taxons)); Date date = DateUtils.parseDate( fields[columnNameToIndex.get("Release Date")].replaceAll(FLANKING_QUOTES_REGEX, ""), DATE_FORMATS); geoRecord.setReleaseDate(date); geoRecord.setSeriesType(fields[columnNameToIndex.get("Series Type")]); records.add(geoRecord); } is.close(); if (records.isEmpty()) { log.warn("No records obtained"); } return records; }
From source file:xtuaok.sharegyazo.HttpMultipartPostRequest.java
public String send() { URLConnection conn = null; String res = null;/*from ww w . j a v a2s .c o m*/ try { conn = new URL(mCgi).openConnection(); conn.setRequestProperty("Content-Type", "multipart/form-data; boundary=" + BOUNDARY); ((HttpURLConnection) conn).setRequestMethod("POST"); conn.setDoOutput(true); conn.connect(); OutputStream os = conn.getOutputStream(); os.write(createBoundaryMessage("imagedata").getBytes()); os.write(mByteData); String endBoundary = "\r\n--" + BOUNDARY + "--\r\n"; os.write(endBoundary.getBytes()); os.close(); InputStream is = conn.getInputStream(); res = convertToString(is); } catch (Exception e) { Log.d(LOG_TAG, e.getMessage() + ""); } finally { if (conn != null) { ((HttpURLConnection) conn).disconnect(); } } return res; }