List of usage examples for java.net URISyntaxException printStackTrace
public void printStackTrace()
From source file:com.cloud.network.resource.NccHttpCode.java
private ExternalNetworkResourceUsageAnswer getPublicIpBytesSentAndReceived( ExternalNetworkResourceUsageCommand cmd) throws ExecutionException { ExternalNetworkResourceUsageAnswer answer = new ExternalNetworkResourceUsageAnswer(cmd); long networkid = cmd.getNetworkid(); try {/*from w ww . ja va 2s. co m*/ //TODO send GET cmd to get the network stats URI agentUri = null; String response = null; try { agentUri = new URI("https", null, _ip, DEFAULT_PORT, "/cs/adcaas/v1/networks/" + networkid + "/ipStats", null, null); org.json.JSONObject jsonBody = new JSONObject(); response = getHttpRequest(jsonBody.toString(), agentUri, _sessionid); JSONArray statsIPList = null; if (response != null) { statsIPList = new JSONObject(response).getJSONObject("stats").getJSONArray("ipBytes"); } if (statsIPList != null) { for (int i = 0; i < statsIPList.length(); i++) { JSONObject ipstat = statsIPList.getJSONObject(i); JSONObject ipvalues = ipstat.getJSONObject("ipstats"); if (ipstat != null) { long[] bytesSentAndReceived = new long[] { 0, 0 }; bytesSentAndReceived[0] = ipvalues.getLong("received"); bytesSentAndReceived[1] = ipvalues.getLong("sent"); if (bytesSentAndReceived[0] >= 0 && bytesSentAndReceived[1] >= 0) { answer.ipBytes.put(ipstat.getString("ip"), bytesSentAndReceived); } } } } s_logger.debug("IPStats Response :" + response); } catch (URISyntaxException e) { e.printStackTrace(); } catch (ExecutionException e) { s_logger.debug("Seesion Alive" + e.getMessage()); e.printStackTrace(); } } catch (Exception e) { s_logger.error("Failed to get bytes sent and recived statistics due to " + e); throw new ExecutionException(e.getMessage()); } return answer; }
From source file:com.appdynamics.demo.gasp.service.RESTIntentService.java
@Override protected void onHandleIntent(Intent intent) { Uri action = intent.getData();//from ww w . java2 s .com Bundle extras = intent.getExtras(); if (extras == null || action == null || !extras.containsKey(EXTRA_RESULT_RECEIVER)) { Log.e(TAG, "You did not pass extras or data with the Intent."); return; } int verb = extras.getInt(EXTRA_HTTP_VERB, GET); Bundle params = extras.getParcelable(EXTRA_PARAMS); Bundle headers = extras.getParcelable(EXTRA_HEADERS); ResultReceiver receiver = extras.getParcelable(EXTRA_RESULT_RECEIVER); try { HttpRequestBase request = null; // Get query params from Bundle and build URL switch (verb) { case GET: { request = new HttpGet(); attachUriWithQuery(request, action, params); } break; case DELETE: { request = new HttpDelete(); attachUriWithQuery(request, action, params); } break; case POST: { request = new HttpPost(); request.setURI(new URI(action.toString())); HttpPost postRequest = (HttpPost) request; if (params != null) { UrlEncodedFormEntity formEntity = new UrlEncodedFormEntity(paramsToList(params)); postRequest.setEntity(formEntity); } } break; case PUT: { request = new HttpPut(); request.setURI(new URI(action.toString())); HttpPut putRequest = (HttpPut) request; if (params != null) { UrlEncodedFormEntity formEntity = new UrlEncodedFormEntity(paramsToList(params)); putRequest.setEntity(formEntity); } } break; } // Get Headers from Bundle for (BasicNameValuePair header : paramsToList(headers)) { request.setHeader(header.getName(), header.getValue()); } if (request != null) { HttpClient client = new DefaultHttpClient(); Log.d(TAG, "Executing request: " + verbToString(verb) + ": " + action.toString()); HttpResponse response = client.execute(request); HttpEntity responseEntity = response.getEntity(); StatusLine responseStatus = response.getStatusLine(); int statusCode = responseStatus != null ? responseStatus.getStatusCode() : 0; if ((responseEntity != null) && (responseStatus.getStatusCode() == 200)) { Bundle resultData = new Bundle(); resultData.putString(REST_RESULT, EntityUtils.toString(responseEntity)); receiver.send(statusCode, resultData); } else { receiver.send(statusCode, null); } } } catch (URISyntaxException e) { Log.e(TAG, "URI syntax was incorrect. " + verbToString(verb) + ": " + action.toString(), e); receiver.send(0, null); } catch (UnsupportedEncodingException e) { Log.e(TAG, "A UrlEncodedFormEntity was created with an unsupported encoding.", e); receiver.send(0, null); } catch (ClientProtocolException e) { Log.e(TAG, "There was a problem when sending the request.", e); receiver.send(0, null); } catch (IOException e) { Log.e(TAG, "There was a problem when sending the request.", e); receiver.send(0, null); } catch (Exception e) { e.printStackTrace(); } }
From source file:com.ximai.savingsmore.save.activity.AddGoodsAcitivyt.java
@Override protected void onActivityResult(int requestCode, int resultCode, Intent intent) { super.onActivityResult(requestCode, resultCode, intent); if (resultCode != RESULT_OK) { return;/*from w w w . ja va 2 s .c o m*/ } else if (requestCode == PICK_FROM_CAMERA || requestCode == PICK_FROM_IMAGE) { Uri uri = null; if (null != intent && intent.getData() != null) { uri = intent.getData(); } else { String fileName = PreferencesUtils.getString(this, "tempName"); uri = Uri.fromFile(new File(FileSystem.getCachesDir(this, true).getAbsolutePath(), fileName)); } if (uri != null) { cropImage(uri, CROP_PHOTO_CODE); } } else if (requestCode == CROP_PHOTO_CODE) { Uri photoUri = intent.getParcelableExtra(MediaStore.EXTRA_OUTPUT); try { upLoadImage(new File((new URI(photoUri.toString()))), "BusinessLicense"); } catch (URISyntaxException e) { e.printStackTrace(); } //addImage(imagePath); } }
From source file:com.appfirst.communication.AFClient.java
/** * Update the badge information for a specific mobile device. Used to reset * the unread messages count.//from w w w .ja v a 2 s. c o m * * @param url * the query url * @param count * new badge count, which is 0 in this case. * @param uid * unique identifier of the device, provided by Google CD2M. * @return the updated device information. */ public JSONObject updateDeviceBadge(String url, int count, String uid) { JSONObject jsonObject = null; HttpPut request = null; String params = String.format("?badge=%d&uid=%s", count, uid); try { request = new HttpPut(new URI(url + params)); } catch (URISyntaxException e) { // TODO Auto-generated catch block e.printStackTrace(); } jsonObject = this.makeJsonObjectPutRequest(request); return jsonObject; }
From source file:com.appfirst.communication.AFClient.java
/** * Gets an PolledDataObject with id./*ww w .ja v a 2 s . co m*/ * * @param url * the public api address of the query * @param number * the number of points to retrieve * @param start * don't retrieve any points before this date, if given. * @param end * retrieve data from this timestamp backwards. If not given, it * gets the most recent data. * @return a {@link PolledDataData} object. */ public List<PolledDataData> getPollDataDataList(String url, long start, long end, int number) { JSONArray dataObject = null; URI uri = null; try { uri = new URI(String.format("%s?num=%d", url, number)); } catch (URISyntaxException e) { // TODO Auto-generated catch block e.printStackTrace(); } HttpGet getRequest = new HttpGet(uri); dataObject = makeJsonArrayRequest(getRequest); return Helper.convertPolledDataDataList(dataObject); }
From source file:com.appfirst.communication.AFClient.java
/** * Gets an PolledDataObject with id./* www.j a v a 2 s . c om*/ * * @param url * the public api address of the query * @return a {@link PolledDataData} object. */ public List<PolledDataData> getPollDataDataList(String url, int number) { JSONArray dataObject = null; String params = String.format("?num=%d", number); HttpGet getRequest = null; try { getRequest = new HttpGet(new URI(url + params)); } catch (URISyntaxException e) { // TODO Auto-generated catch block e.printStackTrace(); } dataObject = makeJsonArrayRequest(getRequest); return Helper.convertPolledDataDataList(dataObject); }
From source file:com.appfirst.communication.AFClient.java
/** * Change the alert status to be either active or inactive. * //ww w. ja va 2 s. co m * @param url * query url * @param id * the id of the alert * @param active * Boolean value indicates whether the alert is active or not. * @return the modified Alert object. */ public Alert updateAlertStatus(String url, int id, Boolean active) { JSONObject jsonObject = new JSONObject(); HttpPut request = null; String params = String.format("?id=%d&active=%s", id, active.toString()); try { request = new HttpPut(new URI(url + params)); } catch (URISyntaxException e) { // TODO Auto-generated catch block e.printStackTrace(); } jsonObject = makeJsonObjectPutRequest(request); return new Alert(jsonObject); }
From source file:org.lexgrid.valuesets.helper.VSDServiceHelper.java
/** * Return a string representation the URI's of all of the coding schemes * used in the supplied value domain/*from w ww . j av a 2s .c o m*/ * * @param vdDef * supplied value domain * @return List of unique URIs. Returned as strings because we aren't all * that picky about the syntax * @throws LBException * @throws URISyntaxException */ public HashSet<String> getCodingSchemeURIs(ValueSetDefinition vdDef) throws LBException { HashSet<String> csRefs = new HashSet<String>(); if (vdDef != null && vdDef.getDefinitionEntry() != null) { // Always add the default coding scheme, even if it isn't used if (!StringUtils.isEmpty(vdDef.getDefaultCodingScheme())) csRefs.add(getURIForCodingSchemeName(vdDef.getMappings(), vdDef.getDefaultCodingScheme())); // Iterate over all of the individual definitions Iterator<DefinitionEntry> deIter = vdDef.getDefinitionEntryAsReference().iterator(); while (deIter.hasNext()) { DefinitionEntry de = deIter.next(); String csName = null; if (de.getCodingSchemeReference() != null) { csName = de.getCodingSchemeReference().getCodingScheme(); } else if (de.getEntityReference() != null) { String entityNamespaceName = de.getEntityReference().getEntityCodeNamespace(); if (!StringUtils.isEmpty(entityNamespaceName)) { csName = getCodingSchemeNameForNamespaceName(vdDef.getMappings(), entityNamespaceName); } } else if (de.getPropertyReference() != null) { csName = de.getPropertyReference().getCodingScheme(); } else if (de.getValueSetDefinitionReference() != null) { try { csRefs.addAll(getCodingSchemeURIs(vsds_.getValueSetDefinitionByUri( new URI(de.getValueSetDefinitionReference().getValueSetDefinitionURI())))); } catch (URISyntaxException e) { // TODO Decide what to do here - the value domain URI // isn't valid? e.printStackTrace(); } } else { assert false : "Invalid value domain definition"; } if (!StringUtils.isEmpty(csName) && !StringUtils.equals(csName, vdDef.getDefaultCodingScheme())) { String csURI = getURIForCodingSchemeName(vdDef.getMappings(), csName); if (!StringUtils.isEmpty(csURI)) csRefs.add(csURI); } } } return csRefs; }
From source file:me.trashout.fragment.TrashReportOrEditFragment.java
private void setPosition(double lat, double lng) { trashReportPosition.setText(PositionUtils.getFormattedLocation(getContext(), lat, lng)); if (trashReportMap.getVisibility() == View.VISIBLE) { String mapUrl = PositionUtils.getStaticMapUrl(getActivity(), lat, lng); try {// w w w. jav a 2 s. co m URI mapUri = new URI(mapUrl.replace("|", "%7c")); Log.d(TAG, "setupDumpData: mapUrl = " + String.valueOf(mapUri.toURL())); GlideApp.with(this).load(String.valueOf(mapUri.toURL())).centerCrop().dontTransform() .into(trashReportMap); } catch (URISyntaxException e) { e.printStackTrace(); } catch (MalformedURLException e) { e.printStackTrace(); } } }
From source file:uk.bl.wa.indexer.WARCIndexer.java
/** * This extracts metadata from the ArchiveRecord and creates a suitable SolrRecord. * Removes the text field if flag set./* w ww.j a v a 2 s . co m*/ * * @param archiveName * @param record * @param isTextIncluded * @return * @throws IOException */ public SolrRecord extract(String archiveName, ArchiveRecord record, boolean isTextIncluded) throws IOException { final long start = System.nanoTime(); ArchiveRecordHeader header = record.getHeader(); SolrRecord solr = solrFactory.createRecord(archiveName, header); if (!header.getHeaderFields().isEmpty()) { if (header.getHeaderFieldKeys().contains(HEADER_KEY_TYPE)) { log.debug("Looking at " + header.getHeaderValue(HEADER_KEY_TYPE)); if (!checkRecordType((String) header.getHeaderValue(HEADER_KEY_TYPE))) { return null; } // Store WARC record type: solr.setField(SolrFields.SOLR_RECORD_TYPE, (String) header.getHeaderValue(HEADER_KEY_TYPE)); //Store WARC-Record-ID solr.setField(SolrFields.WARC_KEY_ID, (String) header.getHeaderValue(HEADER_KEY_ID)); solr.setField(SolrFields.WARC_IP, (String) header.getHeaderValue(HEADER_KEY_IP)); } else { // else we're processing ARCs so nothing to filter and no // revisits solr.setField(SolrFields.SOLR_RECORD_TYPE, "arc"); } if (header.getUrl() == null) return null; // Get the URL: String targetUrl = Normalisation.sanitiseWARCHeaderValue(header.getUrl()); // Strip down very long URLs to avoid // "org.apache.commons.httpclient.URIException: Created (escaped) // uuri > 2083" // Trac #2271: replace string-splitting with URI-based methods. if (targetUrl.length() > 2000) targetUrl = targetUrl.substring(0, 2000); log.debug( "Current heap usage: " + FileUtils.byteCountToDisplaySize(Runtime.getRuntime().totalMemory())); log.debug("Processing " + targetUrl + " from " + archiveName); // Check the filters: if (this.checkProtocol(targetUrl) == false) return null; if (this.checkUrl(targetUrl) == false) return null; if (this.checkExclusionFilter(targetUrl) == false) return null; // ----------------------------------------------------- // Add user supplied Archive-It Solr fields and values: // ----------------------------------------------------- solr.setField(SolrFields.INSTITUTION, WARCIndexerCommand.institution); solr.setField(SolrFields.COLLECTION, WARCIndexerCommand.collection); solr.setField(SolrFields.COLLECTION_ID, WARCIndexerCommand.collection_id); // --- Basic headers --- // Basic metadata: solr.setField(SolrFields.SOURCE_FILE, archiveName); solr.setField(SolrFields.SOURCE_FILE_OFFSET, "" + header.getOffset()); String filePath = header.getReaderIdentifier();//Full path of file //Will convert windows path to linux path. Linux paths will not be modified. String linuxFilePath = FilenameUtils.separatorsToUnix(filePath); solr.setField(SolrFields.SOURCE_FILE_PATH, linuxFilePath); byte[] url_md5digest = md5 .digest(Normalisation.sanitiseWARCHeaderValue(header.getUrl()).getBytes("UTF-8")); // String url_base64 = // Base64.encodeBase64String(fullUrl.getBytes("UTF-8")); String url_md5hex = Base64.encodeBase64String(url_md5digest); solr.setField(SolrFields.SOLR_URL, Normalisation.sanitiseWARCHeaderValue(header.getUrl())); if (addNormalisedURL) { solr.setField(SolrFields.SOLR_URL_NORMALISED, Normalisation.canonicaliseURL(targetUrl)); } // Get the length, but beware, this value also includes the HTTP headers (i.e. it is the payload_length): long content_length = header.getLength(); // Also pull out the file extension, if any: String resourceName = parseResourceName(targetUrl); solr.addField(SolrFields.RESOURCE_NAME, resourceName); solr.addField(SolrFields.CONTENT_TYPE_EXT, parseExtension(resourceName)); // Add URL-based fields: URI saneURI = parseURL(solr, targetUrl); // Prepare crawl date information: String waybackDate = (header.getDate().replaceAll("[^0-9]", "")); Date crawlDate = getWaybackDate(waybackDate); // Store the dates: solr.setField(SolrFields.CRAWL_DATE, formatter.format(crawlDate)); solr.setField(SolrFields.CRAWL_YEAR, getYearFromDate(crawlDate)); // Use the current value as the waybackDate: solr.setField(SolrFields.WAYBACK_DATE, waybackDate); Instrument.timeRel("WARCIndexer.extract#total", "WARCIndexer.extract#archeaders", start); // ----------------------------------------------------- // Now consume record and HTTP headers (only) // ----------------------------------------------------- InputStream tikainput = null; // Only parse HTTP headers for HTTP URIs if (targetUrl.startsWith("http")) { // Parse HTTP headers: String statusCode = null; if (record instanceof WARCRecord) { statusCode = this.processWARCHeaders(record, header, targetUrl, solr); tikainput = record; } else if (record instanceof ARCRecord) { ARCRecord arcr = (ARCRecord) record; statusCode = "" + arcr.getStatusCode(); this.processHeaders(solr, statusCode, arcr.getHttpHeaders(), targetUrl); arcr.skipHttpHeader(); tikainput = arcr; } else { log.error("FAIL! Unsupported archive record type."); return solr; } solr.setField(SolrFields.SOLR_STATUS_CODE, statusCode); // Skip recording non-content URLs (i.e. 2xx responses only please): if (!checkResponseCode(statusCode)) { log.debug("Skipping this record based on status code " + statusCode + ": " + targetUrl); return null; } } else { log.info("Skipping header parsing as URL does not start with 'http'"); } // ----------------------------------------------------- // Headers have been processed, payload ready to cache: // ----------------------------------------------------- // Update the content_length based on what's available: content_length = tikainput.available(); // Record the length: solr.setField(SolrFields.CONTENT_LENGTH, "" + content_length); // Create an appropriately cached version of the payload, to allow analysis. final long hashStreamStart = System.nanoTime(); HashedCachedInputStream hcis = new HashedCachedInputStream(header, tikainput, content_length); tikainput = hcis.getInputStream(); String hash = hcis.getHash(); Instrument.timeRel("WARCIndexer.extract#total", "WARCIndexer.extract#hashstreamwrap", hashStreamStart); // Use an ID that ensures every URL+timestamp gets a separate // record: String id = waybackDate + "/" + url_md5hex; // Set these last: solr.setField(SolrFields.ID, id); solr.setField(SolrFields.HASH, hash); // ----------------------------------------------------- // Apply any annotations: // ----------------------------------------------------- if (ant != null) { try { ant.applyAnnotations(saneURI, solr.getSolrDocument()); } catch (URISyntaxException e) { e.printStackTrace(); log.error("Failed to annotate " + saneURI + " : " + e); } } // ----------------------------------------------------- // WARC revisit record handling: // ----------------------------------------------------- // If this is a revisit record, we should just return an update to the crawl_dates (when using hashUrlId) if (WARCConstants.WARCRecordType.revisit.name() .equalsIgnoreCase((String) header.getHeaderValue(HEADER_KEY_TYPE))) { solr.removeField(SolrFields.CONTENT_LENGTH); //It is 0 and would mess with statistics //Copy content_type_served to content_type (no tika/droid for revisits) solr.addField(SolrFields.SOLR_CONTENT_TYPE, (String) solr.getFieldValue(SolrFields.CONTENT_TYPE_SERVED)); return solr; } // ----------------------------------------------------- // Payload duplication has been checked, ready to parse: // ----------------------------------------------------- final long analyzeStart = System.nanoTime(); // Mark the start of the payload, with a readLimit corresponding to // the payload size: tikainput.mark((int) content_length); // Pass on to other extractors as required, resetting the stream before each: this.wpa.analyse(archiveName, header, tikainput, solr, content_length); Instrument.timeRel("WARCIndexer.extract#total", "WARCIndexer.extract#analyzetikainput", analyzeStart); // Clear up the caching of the payload: hcis.cleanup(); // ----------------------------------------------------- // Payload analysis complete, now performing text analysis: // ----------------------------------------------------- this.txa.analyse(solr); // Remove the Text Field if required if (!isTextIncluded) { solr.removeField(SolrFields.SOLR_EXTRACTED_TEXT); } else { // Otherwise, decide whether to store or both store and index // the text: if (storeText == false) { // Copy the text into the indexed (but not stored) field: solr.setField(SolrFields.SOLR_EXTRACTED_TEXT_NOT_STORED, (String) solr.getField(SolrFields.SOLR_EXTRACTED_TEXT).getFirstValue()); // Take the text out of the original (stored) field. solr.removeField(SolrFields.SOLR_EXTRACTED_TEXT); } } } Instrument.timeRel("WARCIndexerCommand.parseWarcFiles#solrdocCreation", "WARCIndexer.extract#total", start); String servedType = "" + solr.getField(SolrFields.CONTENT_TYPE_SERVED); Instrument.timeRel("WARCIndexer#content_types", "WARCIndexer#" + (servedType.contains(";") ? servedType.split(";")[0] : servedType), start); Instrument.timeRel("WARCIndexer#content_types", start); return solr; }