List of usage examples for java.net SocketTimeoutException getMessage
public String getMessage()
From source file:org.infoscoop.web.MultiRssServlet.java
private void mergeRssAnd2JSON(HttpServletRequest request, HttpServletResponse response, String uid, String widgetId, int pageSize, NodeList urlList) throws Exception { try {//from w ww .j a va 2s . c om RssJsonResultBuilder resultBuilder = new SortedRssJsonResultBuilder(pageSize); String dateTimeFormat = request.getHeader("X-IS-DATETIMEFORMAT"); if (dateTimeFormat != null) { dateTimeFormat = URLDecoder.decode(dateTimeFormat, "UTF-8"); } String freshTime = request.getHeader("X-IS-FRESHTIME"); String maxCountString = request.getHeader("X-IS-RSSMAXCOUNT"); int maxCount = 1000; if (maxCountString != null) { try { int paramMaxCount = Integer.parseInt(maxCountString); if (paramMaxCount >= 0) { maxCount = paramMaxCount; } } catch (NumberFormatException e) { log.warn("rssmaxcount \"" + maxCountString + "\" isn't integer value."); } } // Norrowing String titleFilter = request.getHeader("X-IS-TITLEFILTER"); if (titleFilter != null) titleFilter = URLDecoder.decode(titleFilter, "UTF-8"); String creatorFilter = request.getHeader("X-IS-CREATORFILTER"); if (creatorFilter != null) creatorFilter = URLDecoder.decode(creatorFilter, "UTF-8"); String categoryFilter = request.getHeader("X-IS-CATEGORYFILTER"); if (categoryFilter != null) categoryFilter = URLDecoder.decode(categoryFilter, "UTF-8"); int DEFAULT_TIMEOUT = 15 * 1000; boolean modified = false; Map cacheHeaders = new HashMap(); Map errorMap = new HashMap(); List siteCacheHeaders = new ArrayList(); for (int i = 0; i < urlList.getLength(); i++) { Element rssEl = (Element) urlList.item(i); String url = rssEl.getAttribute("url"); ProxyRequest proxyRequest = new ProxyRequest(url, "NoOperation"); proxyRequest.setLocales(request.getLocales()); proxyRequest.setPortalUid(uid); int timeout = request.getIntHeader("MSDPortal-Timeout") - 1000; proxyRequest.setTimeout((timeout > 0) ? timeout : DEFAULT_TIMEOUT); //proxyRequest.setTimeout(timeout); proxyRequest.addIgnoreHeader("user-agent"); proxyRequest.addIgnoreHeader("X-IS-DATETIMEFORMAT"); proxyRequest.addIgnoreHeader("X-IS-FRESHTIME"); proxyRequest.addIgnoreHeader("X-IS-REFRESH"); proxyRequest.addIgnoreHeader("X-IS-RSSMAXCOUNT"); proxyRequest.addIgnoreHeader("X-IS-PAGESIZE"); Enumeration headers = request.getHeaderNames(); while (headers.hasMoreElements()) { String headerName = (String) headers.nextElement(); proxyRequest.putRequestHeader(headerName, request.getHeader(headerName)); } NodeList rssChildNodes = rssEl.getElementsByTagName("header"); for (int j = 0; j < rssChildNodes.getLength(); j++) { Element header = (Element) rssChildNodes.item(j); if (header.getFirstChild() != null) { String name = header.getAttribute("name"); String value = header.getFirstChild().getNodeValue(); if (name == null || name.trim().length() == 0 || value == null || value.trim().length() == 0) continue; proxyRequest.putRequestHeader(name, value); } } int statusCode = 0; String methodType = rssEl.getAttribute("method"); try { if ("post".equals(methodType)) { statusCode = proxyRequest.executePost(); } else { statusCode = proxyRequest.executeGet(); } } catch (SocketTimeoutException ex) { log.error("url: [" + url + "] socket timeout.", ex); errorMap.put(url, new Integer(HttpStatusCode.MSD_SC_TIMEOUT)); } catch (ConnectTimeoutException ex) { log.error("url: [" + url + "] connection timeout.", ex); errorMap.put(url, new Integer(500)); } catch (SocketException ex) { log.error("url: [" + url + "] socket error.", ex); errorMap.put(url, new Integer(HttpStatus.SC_NOT_FOUND)); } catch (IOException ex) { log.error("url: [" + url + "] I/O error.", ex); errorMap.put(url, new Integer(HttpStatus.SC_NOT_FOUND)); } catch (Exception ex) { log.error("url: [" + url + "]" + ex.getMessage(), ex); errorMap.put(url, new Integer(500)); } BufferedInputStream bis = null; if (errorMap.containsKey(url)) { // nothing } else if (statusCode == 204) { log.warn("url:[" + url + "] is no content #" + statusCode); modified = true; } else if (statusCode == 304) { log.warn("url:[" + url + "] is not modified #" + statusCode); } else if (statusCode != 200) { log.error("url:[" + url + "] had error status code #" + statusCode); errorMap.put(url, new Integer(statusCode)); } else { log.info("url:[" + url + "] is succed #" + statusCode); try { modified = true; bis = new BufferedInputStream(proxyRequest.getResponseBody()); ByteArrayOutputStream baos = new ByteArrayOutputStream(); byte[] buf = new byte[10240]; int c; while ((c = bis.read(buf)) != -1) { baos.write(buf, 0, c); baos.flush(); } bis.close(); byte[] data = baos.toByteArray(); baos.close(); //RssCacheDAO.newInstance().insertCache(uid, widgetId+url, -1,data ); Map responseHeaders = proxyRequest.getResponseHeaders(); String lastModifiedName = "Last-Modified".toLowerCase(); if (responseHeaders.containsKey(lastModifiedName)) { siteCacheHeaders.add(lastModifiedName); siteCacheHeaders.add(responseHeaders.get(lastModifiedName)); } String etagName = "ETag".toLowerCase(); if (responseHeaders.containsKey(etagName)) { siteCacheHeaders.add(etagName); siteCacheHeaders.add(responseHeaders.get(etagName)); } if (siteCacheHeaders.size() > 0) { cacheHeaders.put(url, siteCacheHeaders.toArray()); siteCacheHeaders.clear(); } bis = new BufferedInputStream(new ByteArrayInputStream(data)); } catch (IOException ex) { log.error("rss reading " + url + " is failed.", ex); cacheHeaders.remove(url); errorMap.put(url, new Integer(500)); bis.close(); bis = null; } } if (bis == null) continue; RssHandler handler; boolean isAtom = RssFilter.isAtom(bis); XMLFilter.skipEmptyLine(bis); if (isAtom) { handler = new AtomHandler(resultBuilder, dateTimeFormat, freshTime, maxCount, titleFilter, creatorFilter, categoryFilter, i); } else { handler = new RssHandler(resultBuilder, dateTimeFormat, freshTime, maxCount, titleFilter, creatorFilter, categoryFilter, i); } try { XMLReader reader = factory.newSAXParser().getXMLReader(); reader.setEntityResolver(NoOpEntityResolver.getInstance()); reader.setContentHandler(handler); reader.parse(new InputSource(bis)); } catch (SAXException e) { log.info("Parsing rss " + url + " is failed.", e); cacheHeaders.remove(url); errorMap.put(url, new Integer(HttpStatusCode.MSD_SC_CONTENT_PARSE_ERROR)); } } if (!modified && errorMap.isEmpty()) { log.warn("multi rss is not modified."); response.setStatus(304); return; } else { try { long freshTimeLong = new Date().getTime(); if (freshTime != null) freshTimeLong = Long.parseLong(freshTime.trim()); setOldData(resultBuilder, uid, widgetId, freshTimeLong, titleFilter, creatorFilter, categoryFilter); } catch (NumberFormatException e) { log.error("", e); } //} int pageCount = resultBuilder.getPageCount(); // We create the result cash by all means. //if( pageCount > 1 ) { for (int pageNum = 0; pageNum < pageCount; pageNum++) { RssCacheDAO.newInstance().insertCache(uid, widgetId, pageNum, resultBuilder.getResult(pageNum)); } //} } response.addHeader("Content-Type", "text/plain; charset=UTF-8"); String result = resultBuilder.getResult(); if (!errorMap.isEmpty()) { JSONObject errors = new JSONObject(errorMap); result = "{errors:" + errors.toString() + "," + result.substring(result.indexOf("{") + 1); } if (!cacheHeaders.isEmpty()) { StringBuffer cacheHeadersBuf = new StringBuffer(); cacheHeadersBuf.append("cacheHeaders : {"); for (Iterator keys = cacheHeaders.keySet().iterator(); keys.hasNext();) { String url = (String) keys.next(); Object[] headers = (Object[]) cacheHeaders.get(url); cacheHeadersBuf.append("\"").append(url).append("\" : {"); for (int i = 0; i < headers.length; i += 2) { cacheHeadersBuf.append("\"").append(headers[i]).append("\""); cacheHeadersBuf.append(" : '").append(headers[i + 1]).append("'"); if (i + 2 < headers.length) cacheHeadersBuf.append(","); } cacheHeadersBuf.append("}"); if (keys.hasNext()) cacheHeadersBuf.append(","); } cacheHeadersBuf.append("}"); result = "{" + cacheHeadersBuf.toString() + "," + result.substring(result.indexOf("{") + 1); } response.setContentLength(result.getBytes("UTF-8").length); OutputStreamWriter out = new OutputStreamWriter(response.getOutputStream(), "UTF-8"); try { out.write(result); out.flush(); } catch (SocketException ex) { // ignore client abort exception } finally { if (out != null) { try { out.close(); } catch (IOException ex) { // ignore } } } } catch (Exception e) { log.error("unexpected error occurred.", e); response.sendError(500, e.getMessage()); } }
From source file:org.apache.hadoop.hbase.client.TestHCM.java
/** * Test that an operation can fail if we read the global operation timeout, even if the * individual timeout is fine. We do that with: * - client side: an operation timeout of 30 seconds * - server side: we sleep 20 second at each attempt. The first work fails, the second one * succeeds. But the client won't wait that much, because 20 + 20 > 30, so the client * timeouted when the server answers.//from ww w .j a v a 2s.com */ @Test public void testOperationTimeout() throws Exception { HTableDescriptor hdt = TEST_UTIL.createTableDescriptor("HCM-testOperationTimeout"); hdt.addCoprocessor(SleepAndFailFirstTime.class.getName()); HTable table = TEST_UTIL.createTable(hdt, new byte[][] { FAM_NAM }, TEST_UTIL.getConfiguration()); // Check that it works if the timeout is big enough table.setOperationTimeout(120 * 1000); table.get(new Get(FAM_NAM)); // Resetting and retrying. Will fail this time, not enough time for the second try SleepAndFailFirstTime.ct.set(0); try { table.setOperationTimeout(30 * 1000); table.get(new Get(FAM_NAM)); Assert.fail("We expect an exception here"); } catch (SocketTimeoutException e) { // The client has a CallTimeout class, but it's not shared.We're not very clean today, // in the general case you can expect the call to stop, but the exception may vary. // In this test however, we're sure that it will be a socket timeout. LOG.info("We received an exception, as expected ", e); } catch (IOException e) { Assert.fail("Wrong exception:" + e.getMessage()); } finally { table.close(); } }
From source file:fr.bmartel.speedtest.SpeedTestTask.java
/** * Start FTP upload.//from w w w. ja v a 2 s. c o m * * @param hostname ftp host * @param port ftp port * @param uri upload uri * @param fileSizeOctet file size in octet * @param user username * @param password password */ public void startFtpUpload(final String hostname, final int port, final String uri, final int fileSizeOctet, final String user, final String password) { mSpeedTestMode = SpeedTestMode.UPLOAD; mUploadFileSize = new BigDecimal(fileSizeOctet); mForceCloseSocket = false; mErrorDispatched = false; if (mWriteExecutorService == null || mWriteExecutorService.isShutdown()) { mWriteExecutorService = Executors.newSingleThreadExecutor(); } mWriteExecutorService.execute(new Runnable() { @Override public void run() { final FTPClient ftpClient = new FTPClient(); final RandomGen randomGen = new RandomGen(); RandomAccessFile uploadFile = null; try { ftpClient.connect(hostname, port); ftpClient.login(user, password); ftpClient.enterLocalPassiveMode(); ftpClient.setFileType(FTP.BINARY_FILE_TYPE); byte[] fileContent = new byte[] {}; if (mSocketInterface.getUploadStorageType() == UploadStorageType.RAM_STORAGE) { /* generate a file with size of fileSizeOctet octet */ fileContent = randomGen.generateRandomArray(fileSizeOctet); } else { uploadFile = randomGen.generateRandomFile(fileSizeOctet); uploadFile.seek(0); } mFtpOutputstream = ftpClient.storeFileStream(uri); if (mFtpOutputstream != null) { mUploadTempFileSize = 0; final int uploadChunkSize = mSocketInterface.getUploadChunkSize(); final int step = fileSizeOctet / uploadChunkSize; final int remain = fileSizeOctet % uploadChunkSize; mTimeStart = System.currentTimeMillis(); mTimeEnd = 0; if (mRepeatWrapper.isFirstUpload()) { mRepeatWrapper.setFirstUploadRepeat(false); mRepeatWrapper.setStartDate(mTimeStart); } if (mRepeatWrapper.isRepeatUpload()) { mRepeatWrapper.updatePacketSize(mUploadFileSize); } if (mForceCloseSocket) { SpeedTestUtils.dispatchError(mForceCloseSocket, mListenerList, false, ""); } else { for (int i = 0; i < step; i++) { final byte[] chunk = SpeedTestUtils.readUploadData( mSocketInterface.getUploadStorageType(), fileContent, uploadFile, mUploadTempFileSize, uploadChunkSize); mFtpOutputstream.write(chunk, 0, uploadChunkSize); mUploadTempFileSize += uploadChunkSize; if (mRepeatWrapper.isRepeatUpload()) { mRepeatWrapper.updateTempPacketSize(uploadChunkSize); } if (!mReportInterval) { final SpeedTestReport report = mSocketInterface.getLiveUploadReport(); for (int j = 0; j < mListenerList.size(); j++) { mListenerList.get(j).onUploadProgress(report.getProgressPercent(), report); } } } if (remain != 0) { final byte[] chunk = SpeedTestUtils.readUploadData( mSocketInterface.getUploadStorageType(), fileContent, uploadFile, mUploadTempFileSize, remain); mFtpOutputstream.write(chunk, 0, remain); mUploadTempFileSize += remain; if (mRepeatWrapper.isRepeatUpload()) { mRepeatWrapper.updateTempPacketSize(remain); } } if (!mReportInterval) { final SpeedTestReport report = mSocketInterface.getLiveUploadReport(); for (int j = 0; j < mListenerList.size(); j++) { mListenerList.get(j).onUploadProgress(SpeedTestConst.PERCENT_MAX.floatValue(), report); } } mTimeEnd = System.currentTimeMillis(); } mFtpOutputstream.close(); mReportInterval = false; final SpeedTestReport report = mSocketInterface.getLiveUploadReport(); for (int i = 0; i < mListenerList.size(); i++) { mListenerList.get(i).onUploadFinished(report); } if (!mRepeatWrapper.isRepeatUpload()) { closeExecutors(); } } else { mReportInterval = false; SpeedTestUtils.dispatchError(mForceCloseSocket, mListenerList, false, "cant create stream " + "from uri " + uri + " with reply code : " + ftpClient.getReplyCode()); } } catch (SocketTimeoutException e) { //e.printStackTrace(); mReportInterval = false; mErrorDispatched = true; if (!mForceCloseSocket) { SpeedTestUtils.dispatchSocketTimeout(mForceCloseSocket, mListenerList, false, SpeedTestConst.SOCKET_WRITE_ERROR); } else { SpeedTestUtils.dispatchError(mForceCloseSocket, mListenerList, false, e.getMessage()); } closeSocket(); closeExecutors(); } catch (IOException e) { //e.printStackTrace(); mReportInterval = false; mErrorDispatched = true; SpeedTestUtils.dispatchError(mForceCloseSocket, mListenerList, false, e.getMessage()); closeExecutors(); } finally { mErrorDispatched = false; mSpeedTestMode = SpeedTestMode.NONE; disconnectFtp(ftpClient); if (uploadFile != null) { try { uploadFile.close(); randomGen.deleteFile(); } catch (IOException e) { //e.printStackTrace(); } } } } }); }
From source file:fr.bmartel.speedtest.SpeedTestTask.java
/** * Write upload POST request with file generated randomly. *///from w w w . j ava2 s . com public void writeUpload(final String hostname, final int port, final String uri, final int fileSizeOctet) { mSpeedTestMode = SpeedTestMode.UPLOAD; this.mHostname = hostname; this.mPort = port; mUploadFileSize = new BigDecimal(fileSizeOctet); mForceCloseSocket = false; mErrorDispatched = false; mUploadTempFileSize = 0; mTimeStart = System.currentTimeMillis(); connectAndExecuteTask(new Runnable() { @Override public void run() { if (mSocket != null && !mSocket.isClosed()) { RandomAccessFile uploadFile = null; final RandomGen randomGen = new RandomGen(); try { byte[] body = new byte[] {}; if (mSocketInterface.getUploadStorageType() == UploadStorageType.RAM_STORAGE) { /* generate a file with size of fileSizeOctet octet */ body = randomGen.generateRandomArray(fileSizeOctet); } else { uploadFile = randomGen.generateRandomFile(fileSizeOctet); uploadFile.seek(0); } final String head = "POST " + uri + " HTTP/1.1\r\n" + "Host: " + hostname + "\r\nAccept: " + "*/*\r\nContent-Length: " + fileSizeOctet + "\r\n\r\n"; mUploadTempFileSize = 0; final int uploadChunkSize = mSocketInterface.getUploadChunkSize(); final int step = fileSizeOctet / uploadChunkSize; final int remain = fileSizeOctet % uploadChunkSize; if (mSocket.getOutputStream() != null) { if (writeFlushSocket(head.getBytes()) != 0) { throw new SocketTimeoutException(); } mTimeStart = System.currentTimeMillis(); mTimeEnd = 0; if (mRepeatWrapper.isFirstUpload()) { mRepeatWrapper.setFirstUploadRepeat(false); mRepeatWrapper.setStartDate(mTimeStart); } if (mRepeatWrapper.isRepeatUpload()) { mRepeatWrapper.updatePacketSize(mUploadFileSize); } for (int i = 0; i < step; i++) { final byte[] chunk = SpeedTestUtils.readUploadData( mSocketInterface.getUploadStorageType(), body, uploadFile, mUploadTempFileSize, uploadChunkSize); if (writeFlushSocket(chunk) != 0) { throw new SocketTimeoutException(); } mUploadTempFileSize += uploadChunkSize; if (mRepeatWrapper.isRepeatUpload()) { mRepeatWrapper.updateTempPacketSize(uploadChunkSize); } if (!mReportInterval) { final SpeedTestReport report = mSocketInterface.getLiveUploadReport(); for (int j = 0; j < mListenerList.size(); j++) { mListenerList.get(j).onUploadProgress(report.getProgressPercent(), report); } } } final byte[] chunk = SpeedTestUtils.readUploadData( mSocketInterface.getUploadStorageType(), body, uploadFile, mUploadTempFileSize, remain); if (remain != 0 && writeFlushSocket(chunk) != 0) { throw new SocketTimeoutException(); } else { mUploadTempFileSize += remain; if (mRepeatWrapper.isRepeatUpload()) { mRepeatWrapper.updateTempPacketSize(remain); } } if (!mReportInterval) { final SpeedTestReport report = mSocketInterface.getLiveUploadReport(); for (int j = 0; j < mListenerList.size(); j++) { mListenerList.get(j).onUploadProgress(SpeedTestConst.PERCENT_MAX.floatValue(), report); } } } } catch (SocketTimeoutException e) { mReportInterval = false; mErrorDispatched = true; closeSocket(); closeExecutors(); if (!mForceCloseSocket) { SpeedTestUtils.dispatchSocketTimeout(mForceCloseSocket, mListenerList, false, SpeedTestConst.SOCKET_WRITE_ERROR); } else { SpeedTestUtils.dispatchError(mForceCloseSocket, mListenerList, false, e.getMessage()); } } catch (IOException e) { mReportInterval = false; mErrorDispatched = true; closeExecutors(); SpeedTestUtils.dispatchError(mForceCloseSocket, mListenerList, false, e.getMessage()); } finally { if (uploadFile != null) { try { uploadFile.close(); randomGen.deleteFile(); } catch (IOException e) { //e.printStackTrace(); } } } } } }, false); }
From source file:org.archive.io.RecordingInputStream.java
public void readToEndOfContent(long contentLength) throws IOException, InterruptedException { // Check we're open before proceeding. if (!isOpen()) { // TODO: should this be a noisier exception-raising error? return;/*w w w. ja va2 s. c o m*/ } long totalBytes = recordingOutputStream.position - recordingOutputStream.getMessageBodyBegin(); long bytesRead = -1L; long maxToRead = -1; while (contentLength <= 0 || totalBytes < contentLength) { try { // read no more than soft max maxToRead = (contentLength <= 0) ? drainBuffer.length : Math.min(drainBuffer.length, contentLength - totalBytes); // nor more than hard max maxToRead = Math.min(maxToRead, recordingOutputStream.getRemainingLength()); // but always at least 1 (to trigger hard max exception) XXX wtf is this? maxToRead = Math.max(maxToRead, 1); bytesRead = read(drainBuffer, 0, (int) maxToRead); if (bytesRead == -1) { break; } totalBytes += bytesRead; if (Thread.interrupted()) { throw new InterruptedException("Interrupted during IO"); } } catch (SocketTimeoutException e) { // A socket timeout is just a transient problem, meaning // nothing was available in the configured timeout period, // but something else might become available later. // Take this opportunity to check the overall // timeout (below). One reason for this timeout is // servers that keep up the connection, 'keep-alive', even // though we asked them to not keep the connection open. if (logger.isLoggable(Level.FINE)) { logger.log(Level.FINE, "socket timeout", e); } // check for interrupt if (Thread.interrupted()) { throw new InterruptedException("Interrupted during IO"); } // check for overall timeout recordingOutputStream.checkLimits(); } catch (SocketException se) { throw se; } catch (NullPointerException e) { // [ 896757 ] NPEs in Andy's Th-Fri Crawl. // A crawl was showing NPE's in this part of the code but can // not reproduce. Adding this rethrowing catch block w/ // diagnostics to help should we come across the problem in the // future. throw new NullPointerException( "Stream " + this.in + ", " + e.getMessage() + " " + Thread.currentThread().getName()); } } }
From source file:org.archive.io.RecordingInputStream.java
/** * Read all of a stream (Or read until we timeout or have read to the max). * @param softMaxLength Maximum length to read; if zero or < 0, then no * limit. If met, return normally. //from w w w .jav a2 s. com * @throws IOException failed read. * @throws RecorderLengthExceededException * @throws RecorderTimeoutException * @throws InterruptedException * @deprecated */ public void readFullyOrUntil(long softMaxLength) throws IOException, RecorderLengthExceededException, RecorderTimeoutException, InterruptedException { // Check we're open before proceeding. if (!isOpen()) { // TODO: should this be a noisier exception-raising error? return; } long totalBytes = 0L; long bytesRead = -1L; long maxToRead = -1; while (true) { try { // read no more than soft max maxToRead = (softMaxLength <= 0) ? drainBuffer.length : Math.min(drainBuffer.length, softMaxLength - totalBytes); // nor more than hard max maxToRead = Math.min(maxToRead, recordingOutputStream.getRemainingLength()); // but always at least 1 (to trigger hard max exception maxToRead = Math.max(maxToRead, 1); bytesRead = read(drainBuffer, 0, (int) maxToRead); if (bytesRead == -1) { break; } totalBytes += bytesRead; if (Thread.interrupted()) { throw new InterruptedException("Interrupted during IO"); } } catch (SocketTimeoutException e) { // A socket timeout is just a transient problem, meaning // nothing was available in the configured timeout period, // but something else might become available later. // Take this opportunity to check the overall // timeout (below). One reason for this timeout is // servers that keep up the connection, 'keep-alive', even // though we asked them to not keep the connection open. if (logger.isLoggable(Level.FINE)) { logger.log(Level.FINE, "socket timeout", e); } // check for interrupt if (Thread.interrupted()) { throw new InterruptedException("Interrupted during IO"); } // check for overall timeout recordingOutputStream.checkLimits(); } catch (SocketException se) { throw se; } catch (NullPointerException e) { // [ 896757 ] NPEs in Andy's Th-Fri Crawl. // A crawl was showing NPE's in this part of the code but can // not reproduce. Adding this rethrowing catch block w/ // diagnostics to help should we come across the problem in the // future. throw new NullPointerException( "Stream " + this.in + ", " + e.getMessage() + " " + Thread.currentThread().getName()); } // if have read 'enough', just finish if (softMaxLength > 0 && totalBytes >= softMaxLength) { break; // return } } }
From source file:com.twinsoft.convertigo.beans.connectors.HttpConnector.java
protected int doExecuteMethod(final HttpMethod method, Context context) throws ConnectionException, URIException, MalformedURLException { int statuscode = -1; // Tells the method to automatically handle authentication. method.setDoAuthentication(true);/*ww w . j av a 2 s. com*/ // Tells the method to automatically handle redirection. method.setFollowRedirects(false); HttpPool httpPool = ((AbstractHttpTransaction) context.transaction).getHttpPool(); HttpClient httpClient = context.getHttpClient3(httpPool); try { // Display the cookies if (handleCookie) { Cookie[] cookies = httpState.getCookies(); if (Engine.logBeans.isTraceEnabled()) Engine.logBeans.trace( "(HttpConnector) HttpClient request cookies:" + Arrays.asList(cookies).toString()); } forwardHeader(new HeaderForwarder() { public void add(String name, String value, String forwardPolicy) { if (HttpConnector.HTTP_HEADER_FORWARD_POLICY_IGNORE.equals(forwardPolicy)) { Header exHeader = method.getRequestHeader(name); if (exHeader != null) { // Nothing to do Engine.logEngine.debug("(WebViewer) Forwarding header '" + name + "' has been ignored due to forward policy"); } else { method.setRequestHeader(name, value); Engine.logEngine.debug("(WebViewer) Header forwarded and added: " + name + "=" + value); } } else if (HttpConnector.HTTP_HEADER_FORWARD_POLICY_REPLACE.equals(forwardPolicy)) { method.setRequestHeader(name, value); Engine.logEngine.debug("(WebViewer) Header forwarded and replaced: " + name + "=" + value); } else if (HttpConnector.HTTP_HEADER_FORWARD_POLICY_MERGE.equals(forwardPolicy)) { Header exHeader = method.getRequestHeader(name); if (exHeader != null) value = exHeader.getValue() + ", " + value; method.setRequestHeader(name, value); Engine.logEngine.debug("(WebViewer) Header forwarded and merged: " + name + "=" + value); } } }); // handle oAuthSignatures if any if (oAuthKey != null && oAuthSecret != null && oAuthToken != null && oAuthTokenSecret != null) { oAuthConsumer = new HttpOAuthConsumer(oAuthKey, oAuthSecret, hostConfiguration); oAuthConsumer.setTokenWithSecret(oAuthToken, oAuthTokenSecret); oAuthConsumer.sign(method); oAuthConsumer = null; } HttpUtils.logCurrentHttpConnection(httpClient, hostConfiguration, httpPool); hostConfiguration.getParams().setIntParameter(HttpConnectionParams.SO_TIMEOUT, (int) context.requestedObject.getResponseTimeout() * 1000); hostConfiguration.getParams().setIntParameter(HttpConnectionParams.CONNECTION_TIMEOUT, (int) context.requestedObject.getResponseTimeout() * 1000); Engine.logBeans.debug("(HttpConnector) HttpClient: executing method..."); statuscode = httpClient.executeMethod(hostConfiguration, method, httpState); Engine.logBeans.debug("(HttpConnector) HttpClient: end of method successfull"); // Display the cookies if (handleCookie) { Cookie[] cookies = httpState.getCookies(); if (Engine.logBeans.isTraceEnabled()) Engine.logBeans.trace( "(HttpConnector) HttpClient response cookies:" + Arrays.asList(cookies).toString()); } } catch (SocketTimeoutException e) { throw new ConnectionException( "Timeout reached (" + context.requestedObject.getResponseTimeout() + " sec)"); } catch (IOException e) { if (!context.requestedObject.runningThread.bContinue) return statuscode; try { HttpUtils.logCurrentHttpConnection(httpClient, hostConfiguration, httpPool); Engine.logBeans.warn("(HttpConnector) HttpClient: connection error to " + sUrl + ": " + e.getMessage() + "; retrying method"); statuscode = httpClient.executeMethod(hostConfiguration, method, httpState); Engine.logBeans.debug("(HttpConnector) HttpClient: end of method successfull"); } catch (IOException ee) { throw new ConnectionException("Connection error to " + sUrl, ee); } } catch (OAuthException eee) { throw new ConnectionException("OAuth Connection error to " + sUrl, eee); } return statuscode; }
From source file:org.apache.manifoldcf.crawler.connectors.hdfs.HDFSRepositoryConnector.java
/** Process a set of documents. * This is the method that should cause each document to be fetched, processed, and the results either added * to the queue of documents for the current job, and/or entered into the incremental ingestion manager. * The document specification allows this class to filter what is done based on the job. * The connector will be connected before this method can be called. *@param documentIdentifiers is the set of document identifiers to process. *@param statuses are the currently-stored document versions for each document in the set of document identifiers * passed in above./*from w w w . ja v a 2 s.co m*/ *@param activities is the interface this method should use to queue up new document references * and ingest documents. *@param jobMode is an integer describing how the job is being run, whether continuous or once-only. *@param usesDefaultAuthority will be true only if the authority in use for these documents is the default one. */ @Override public void processDocuments(String[] documentIdentifiers, IExistingVersions statuses, Specification spec, IProcessActivity activities, int jobMode, boolean usesDefaultAuthority) throws ManifoldCFException, ServiceInterruption { for (String documentIdentifier : documentIdentifiers) { String versionString; FileStatus fileStatus = getObject(new Path(documentIdentifier)); if (fileStatus != null) { boolean isDirectory = fileStatus.isDirectory(); if (isDirectory) { // If HDFS directory modify dates are transitive, as they are on Unix, // then getting the modify date of the current version is sufficient // to detect any downstream changes we need to be aware of. // (If this turns out to be a bad assumption, this should simply set rval[i] =""). long lastModified = fileStatus.getModificationTime(); versionString = new Long(lastModified).toString(); if (activities.checkDocumentNeedsReindexing(documentIdentifier, versionString)) { // Process directory! String entityReference = documentIdentifier; FileStatus[] fileStatuses = getChildren(fileStatus.getPath()); if (fileStatuses == null) { continue; } for (int j = 0; j < fileStatuses.length; j++) { FileStatus fs = fileStatuses[j++]; String canonicalPath = fs.getPath().toString(); if (checkInclude(session.getUri().toString(), fs, canonicalPath, spec)) { activities.addDocumentReference(canonicalPath, documentIdentifier, RELATIONSHIP_CHILD); } } } } else { long lastModified = fileStatus.getModificationTime(); StringBuilder sb = new StringBuilder(); // Check if the path is to be converted. We record that info in the version string so that we'll reindex documents whose // URI's change. String nameNode = nameNodeProtocol + "://" + nameNodeHost + ":" + nameNodePort; String convertPath = findConvertPath(nameNode, spec, fileStatus.getPath()); if (convertPath != null) { // Record the path. sb.append("+"); pack(sb, convertPath, '+'); } else sb.append("-"); sb.append(new Long(lastModified).toString()); versionString = sb.toString(); // We will record document fetch as an activity long startTime = System.currentTimeMillis(); String errorCode = null; String errorDesc = null; long fileSize = 0; if (activities.checkDocumentNeedsReindexing(documentIdentifier, versionString)) { // Process file! if (!checkIngest(session.getUri().toString(), fileStatus, spec)) { activities.noDocument(documentIdentifier, versionString); continue; } // It is a file to be indexed. long fileLength = fileStatus.getLen(); String fileName = fileStatus.getPath().getName(); String mimeType = mapExtensionToMimeType(fileStatus.getPath().getName()); Date modifiedDate = new Date(fileStatus.getModificationTime()); try { String uri; if (convertPath != null) { uri = convertToWGETURI(convertPath); } else { uri = fileStatus.getPath().toUri().toString(); } if (!activities.checkLengthIndexable(fileLength)) { errorCode = activities.EXCLUDED_LENGTH; errorDesc = "Excluding document because of file length ('" + fileLength + "')"; activities.noDocument(documentIdentifier, versionString); continue; } if (!activities.checkURLIndexable(uri)) { errorCode = activities.EXCLUDED_URL; errorDesc = "Excluding document because of URL ('" + uri + "')"; activities.noDocument(documentIdentifier, versionString); continue; } if (!activities.checkMimeTypeIndexable(mimeType)) { errorCode = activities.EXCLUDED_MIMETYPE; errorDesc = "Excluding document because of mime type (" + mimeType + ")"; activities.noDocument(documentIdentifier, versionString); continue; } if (!activities.checkDateIndexable(modifiedDate)) { errorCode = activities.EXCLUDED_DATE; errorDesc = "Excluding document because of date (" + modifiedDate + ")"; activities.noDocument(documentIdentifier, versionString); continue; } // Prepare the metadata part of RepositoryDocument RepositoryDocument data = new RepositoryDocument(); data.setFileName(fileName); data.setMimeType(mimeType); data.setModifiedDate(modifiedDate); data.addField("uri", uri); BackgroundStreamThread t = new BackgroundStreamThread(getSession(), new Path(documentIdentifier)); try { t.start(); boolean wasInterrupted = false; try { InputStream is = t.getSafeInputStream(); try { data.setBinary(is, fileSize); activities.ingestDocumentWithException(documentIdentifier, versionString, uri, data); } finally { is.close(); } } catch (java.net.SocketTimeoutException e) { throw e; } catch (InterruptedIOException e) { wasInterrupted = true; throw e; } catch (ManifoldCFException e) { if (e.getErrorCode() == ManifoldCFException.INTERRUPTED) { wasInterrupted = true; } throw e; } finally { if (!wasInterrupted) { // This does a join t.finishUp(); } } // No errors. Record the fact that we made it. errorCode = "OK"; // Length we did in bytes fileSize = fileStatus.getLen(); } catch (InterruptedException e) { // We were interrupted out of the join, most likely. Before we abandon the thread, // send a courtesy interrupt. t.interrupt(); throw new ManifoldCFException("Interrupted: " + e.getMessage(), e, ManifoldCFException.INTERRUPTED); } catch (java.net.SocketTimeoutException e) { errorCode = "IOERROR"; errorDesc = e.getMessage(); handleIOException(e); } catch (InterruptedIOException e) { t.interrupt(); throw new ManifoldCFException("Interrupted: " + e.getMessage(), e, ManifoldCFException.INTERRUPTED); } catch (IOException e) { errorCode = "IOERROR"; errorDesc = e.getMessage(); handleIOException(e); } } finally { if (errorCode != null) { activities.recordActivity(new Long(startTime), ACTIVITY_READ, new Long(fileSize), documentIdentifier, errorCode, errorDesc, null); } } } } } else { activities.deleteDocument(documentIdentifier); continue; } } }
From source file:com.jiangge.apns4j.impl.ApnsConnectionImpl.java
private void startErrorWorker() { Thread thread = new Thread(new Runnable() { @Override/*from ww w . j a va 2s. co m*/ public void run() { Socket curSocket = socket; try { if (!isSocketAlive(curSocket)) { return; } InputStream socketIs = curSocket.getInputStream(); byte[] res = new byte[ERROR_RESPONSE_BYTES_LENGTH]; int size = 0; while (true) { try { size = socketIs.read(res); if (size > 0 || size == -1) { // break, when something was read or there is no data any more break; } } catch (SocketTimeoutException e) { // There is no data. Keep reading. } } int command = res[0]; /** EN: error-response,close the socket and resent notifications * CN: ??????? */ if (size == res.length && command == Command.ERROR) { int status = res[1]; int errorId = ApnsTools.parse4ByteInt(res[2], res[3], res[4], res[5]); if (logger.isInfoEnabled()) { logger.info( String.format("%s Received error response. status: %s, id: %s, error-desc: %s", connName, status, errorId, ErrorResponse.desc(status))); } Queue<PushNotification> resentQueue = new LinkedList<PushNotification>(); synchronized (lock) { boolean found = false; errorHappendedLastConn = true; while (!notificationCachedQueue.isEmpty()) { PushNotification pn = notificationCachedQueue.poll(); if (pn.getId() == errorId) { found = true; } else { /** * https://developer.apple.com/library/ios/documentation/NetworkingInternet/Conceptual/RemoteNotificationsPG/Chapters/CommunicatingWIthAPS.html * As the document said, add the notifications which need be resent to the queue. * Igonre the error one */ if (found) { resentQueue.add(pn); } } } if (!found) { logger.warn(connName + " Didn't find error-notification in the queue. Maybe it's time to adjust cache length. id: " + errorId); } } // resend notifications if (!resentQueue.isEmpty()) { ApnsResender.getInstance().resend(name, resentQueue); } } else { // ignore and continue reading logger.error( connName + " Unexpected command or size. commend: " + command + " , size: " + size); } } catch (Exception e) { // logger.error(connName + " " + e.getMessage(), e); logger.error(connName + " " + e.getMessage()); } finally { /** * EN: close the old socket although it may be closed once before. * CN: ??? */ closeSocket(curSocket); } } }); thread.start(); }
From source file:com.dbay.apns4j.impl.ApnsConnectionImpl.java
private void startErrorWorker() { Thread thread = new Thread(new Runnable() { @Override//from w ww . j a v a 2 s. com public void run() { Socket curSocket = socket; try { if (!isSocketAlive(curSocket)) { return; } InputStream socketIs = curSocket.getInputStream(); byte[] res = new byte[ERROR_RESPONSE_BYTES_LENGTH]; int size = 0; while (true) { try { size = socketIs.read(res); if (size > 0 || size == -1) { // break, when something was read or there is no // data any more break; } } catch (SocketTimeoutException e) { // There is no data. Keep reading. Thread.sleep(10); } } int command = res[0]; /** * EN: error-response,close the socket and resent * notifications CN: ??????? */ if (size == res.length && command == Command.ERROR) { int status = res[1]; int errorId = ApnsTools.parse4ByteInt(res[2], res[3], res[4], res[5]); String token = ErrorResponse.desc(status); // callback error token? if (null != errorProcessHandler) { errorProcessHandler.process(errorId, status, token); } if (logger.isInfoEnabled()) { logger.info(String.format( "%s, %s Received error response. status: %s, id: %s, error-desc: %s", serviceName, connName, status, errorId, token)); } Queue<PushNotification> resentQueue = new LinkedList<PushNotification>(); synchronized (lock) { boolean found = false; errorHappendedLastConn = true; while (!notificationCachedQueue.isEmpty()) { PushNotification pn = notificationCachedQueue.poll(); if (pn.getId() == errorId) { found = true; } else { /** * https://developer.apple.com/library/ios/ * documentation * /NetworkingInternet/Conceptual * /RemoteNotificationsPG * /Chapters/CommunicatingWIthAPS.html As * the document said, add the notifications * which need be resent to the queue. Igonre * the error one */ if (found) { resentQueue.add(pn); } } } if (!found) { logger.warn(connName + " Didn't find error-notification in the queue. Maybe it's time to adjust cache length. id: " + errorId); } } // resend notifications if (!resentQueue.isEmpty()) { ApnsResender.getInstance().resend(name, resentQueue); } } else { // ignore and continue reading logger.error( connName + " Unexpected command or size. commend: " + command + " , size: " + size); } } catch (Exception e) { // logger.error(connName + " " + e.getMessage(), e); logger.error(connName + " " + e.getMessage()); } finally { /** * EN: close the old socket although it may be closed once * before. CN: ??? */ closeSocket(curSocket); } } }); thread.start(); }