List of usage examples for java.io InterruptedIOException InterruptedIOException
public InterruptedIOException(String s)
InterruptedIOException
with the specified detail message. From source file:com.splicemachine.compactions.SpliceDefaultCompactor.java
public List<Path> sparkCompact(CompactionRequest request) throws IOException { if (LOG.isTraceEnabled()) SpliceLogUtils.trace(LOG, "sparkCompact(): CompactionRequest=%s", request); FileDetails fd = getFileDetails(request.getFiles(), request.isAllFiles()); this.progress = new CompactionProgress(fd.maxKeyCount); // Find the smallest read point across all the Scanners. long smallestReadPoint = getSmallestReadPoint(); List<StoreFileScanner> scanners; Collection<StoreFile> readersToClose; // Tell HDFS it can drop data out of the caches after reading them, we are compacting on Spark and won't need // that data anytime soon final boolean dropBehind = true; if (this.conf.getBoolean("hbase.regionserver.compaction.private.readers", false)) { // clone all StoreFiles, so we'll do the compaction on a independent copy of StoreFiles, // HFileFiles, and their readers readersToClose = new ArrayList<>(request.getFiles().size()); for (StoreFile f : request.getFiles()) { readersToClose.add(new StoreFile(f)); }//from w w w. j av a2 s .c o m scanners = createFileScanners(readersToClose, smallestReadPoint, dropBehind); } else { readersToClose = Collections.emptyList(); scanners = createFileScanners(request.getFiles(), smallestReadPoint, dropBehind); } StoreFile.Writer writer = null; List<Path> newFiles = new ArrayList<>(); boolean cleanSeqId = false; IOException e = null; try { InternalScanner scanner = null; try { /* Include deletes, unless we are doing a compaction of all files */ ScanType scanType = request.isRetainDeleteMarkers() ? ScanType.COMPACT_RETAIN_DELETES : ScanType.COMPACT_DROP_DELETES; scanner = preCreateCoprocScanner(request, scanType, fd.earliestPutTs, scanners); if (scanner == null) { scanner = createScanner(store, scanners, scanType, smallestReadPoint, fd.earliestPutTs); } if (needsSI(store.getTableName())) { SIDriver driver = SIDriver.driver(); SICompactionState state = new SICompactionState(driver.getTxnSupplier(), driver.getRollForward(), driver.getConfiguration().getActiveTransactionCacheSize()); scanner = new SICompactionScanner(state, scanner); } if (scanner == null) { // NULL scanner returned from coprocessor hooks means skip normal processing. return newFiles; } // Create the writer even if no kv(Empty store file is also ok), // because we need record the max seq id for the store file, see HBASE-6059 if (fd.minSeqIdToKeep > 0) { smallestReadPoint = Math.min(fd.minSeqIdToKeep, smallestReadPoint); cleanSeqId = true; } writer = createTmpWriter(fd, dropBehind); boolean finished = performCompaction(fd, scanner, writer, smallestReadPoint, cleanSeqId, new NoLimitCompactionThroughputController(), request.isAllFiles()); if (!finished) { writer.close(); store.getFileSystem().delete(writer.getPath(), false); writer = null; throw new InterruptedIOException("Aborting compaction of store " + store + " in region " + store.getRegionInfo().getRegionNameAsString() + " because it was interrupted."); } } finally { if (scanner != null) { scanner.close(); } } } catch (IOException ioe) { e = ioe; throw ioe; } finally { try { if (writer != null) { if (e != null) { writer.close(); } else { writer.appendMetadata(fd.maxSeqId, request.isAllFiles()); writer.close(); newFiles.add(writer.getPath()); } } } finally { for (StoreFile f : readersToClose) { try { f.closeReader(true); } catch (IOException ioe) { LOG.warn("Exception closing " + f, ioe); } } } } return newFiles; }
From source file:org.apache.hadoop.hbase.replication.regionserver.HFileReplicator.java
private Map<String, Path> copyHFilesToStagingDir() throws IOException { Map<String, Path> mapOfCopiedHFiles = new HashMap<String, Path>(); Pair<byte[], List<String>> familyHFilePathsPair; List<String> hfilePaths; byte[] family; Path familyStagingDir;/*from w ww .j a v a 2 s .c o m*/ int familyHFilePathsPairsListSize; int totalNoOfHFiles; List<Pair<byte[], List<String>>> familyHFilePathsPairsList; FileSystem sourceFs = null; try { Path sourceClusterPath = new Path(sourceBaseNamespaceDirPath); /* * Path#getFileSystem will by default get the FS from cache. If both source and sink cluster * has same FS name service then it will return peer cluster FS. To avoid this we explicitly * disable the loading of FS from cache, so that a new FS is created with source cluster * configuration. */ String sourceScheme = sourceClusterPath.toUri().getScheme(); String disableCacheName = String.format("fs.%s.impl.disable.cache", new Object[] { sourceScheme }); sourceClusterConf.setBoolean(disableCacheName, true); sourceFs = sourceClusterPath.getFileSystem(sourceClusterConf); User user = userProvider.getCurrent(); // For each table name in the map for (Entry<String, List<Pair<byte[], List<String>>>> tableEntry : bulkLoadHFileMap.entrySet()) { String tableName = tableEntry.getKey(); // Create staging directory for each table Path stagingDir = createStagingDir(new Path(hbaseStagingDir), user, TableName.valueOf(tableName)); familyHFilePathsPairsList = tableEntry.getValue(); familyHFilePathsPairsListSize = familyHFilePathsPairsList.size(); // For each list of family hfile paths pair in the table for (int i = 0; i < familyHFilePathsPairsListSize; i++) { familyHFilePathsPair = familyHFilePathsPairsList.get(i); family = familyHFilePathsPair.getFirst(); hfilePaths = familyHFilePathsPair.getSecond(); familyStagingDir = new Path(stagingDir, Bytes.toString(family)); totalNoOfHFiles = hfilePaths.size(); // For each list of hfile paths for the family List<Future<Void>> futures = new ArrayList<Future<Void>>(); Callable<Void> c; Future<Void> future; int currentCopied = 0; // Copy the hfiles parallely while (totalNoOfHFiles > currentCopied + this.copiesPerThread) { c = new Copier(sourceFs, familyStagingDir, hfilePaths.subList(currentCopied, currentCopied + this.copiesPerThread)); future = exec.submit(c); futures.add(future); currentCopied += this.copiesPerThread; } int remaining = totalNoOfHFiles - currentCopied; if (remaining > 0) { c = new Copier(sourceFs, familyStagingDir, hfilePaths.subList(currentCopied, currentCopied + remaining)); future = exec.submit(c); futures.add(future); } for (Future<Void> f : futures) { try { f.get(); } catch (InterruptedException e) { InterruptedIOException iioe = new InterruptedIOException( "Failed to copy HFiles to local file system. This will be retried again " + "by the source cluster."); iioe.initCause(e); throw iioe; } catch (ExecutionException e) { throw new IOException("Failed to copy HFiles to local file system. This will " + "be retried again by the source cluster.", e); } } } // Add the staging directory to this table. Staging directory contains all the hfiles // belonging to this table mapOfCopiedHFiles.put(tableName, stagingDir); } return mapOfCopiedHFiles; } finally { if (sourceFs != null) { sourceFs.close(); } if (exec != null) { exec.shutdown(); } } }
From source file:org.apache.hadoop.hbase.util.FSHDFSUtils.java
void checkIfCancelled(final CancelableProgressable reporter) throws InterruptedIOException { if (reporter == null) return;/*from ww w . j a va 2 s .c o m*/ if (!reporter.progress()) throw new InterruptedIOException("Operation cancelled"); }
From source file:com.asakusafw.runtime.util.hadoop.ConfigurationProvider.java
private static File detectHadoopConfigurationDirectory(File command, File temporary, Map<String, String> envp) throws IOException { assert command != null; assert temporary != null; assert envp != null; prepareClasspath(temporary, ConfigurationDetecter.class); File resultOutput = new File(temporary, PATH_SUBPROC_OUTPUT); List<String> arguments = new ArrayList<>(); arguments.add(command.getAbsolutePath()); arguments.add(ConfigurationDetecter.class.getName()); arguments.add(resultOutput.getAbsolutePath()); ProcessBuilder processBuilder = new ProcessBuilder(arguments); processBuilder.environment().clear(); processBuilder.environment().putAll(envp); processBuilder.environment().put(ENV_HADOOP_CLASSPATH, temporary.getPath()); Process process = processBuilder.start(); try {/*from w ww . j a v a2 s .c om*/ Thread redirectOut = redirect(process.getInputStream(), System.out); Thread redirectErr = redirect(process.getErrorStream(), System.err); try { int exit = process.waitFor(); redirectOut.join(); redirectErr.join(); if (exit != 0) { throw new IOException( MessageFormat.format("Failed to execute Hadoop command (exitcode={1}): {0}", arguments, String.valueOf(exit))); } } catch (InterruptedException e) { throw (IOException) new InterruptedIOException( MessageFormat.format("Failed to execute Hadoop command (interrupted): {0}", arguments)) .initCause(e); } } finally { process.destroy(); } if (resultOutput.isFile() == false) { throw new IOException( MessageFormat.format("Failed to restore Hadoop configuration path: {0}", resultOutput)); } File path = ConfigurationDetecter.read(resultOutput); return path; }
From source file:org.apache.hadoop.hbase.wal.WALFactory.java
public Reader createReader(final FileSystem fs, final Path path, CancelableProgressable reporter, boolean allowCustom) throws IOException { Class<? extends DefaultWALProvider.Reader> lrClass = allowCustom ? logReaderClass : ProtobufLogReader.class; try {/*from w ww . ja v a 2 s . c om*/ // A wal file could be under recovery, so it may take several // tries to get it open. Instead of claiming it is corrupted, retry // to open it up to 5 minutes by default. long startWaiting = EnvironmentEdgeManager.currentTime(); long openTimeout = timeoutMillis + startWaiting; int nbAttempt = 0; FSDataInputStream stream = null; while (true) { try { if (lrClass != ProtobufLogReader.class) { // User is overriding the WAL reader, let them. DefaultWALProvider.Reader reader = lrClass.newInstance(); reader.init(fs, path, conf, null); return reader; } else { stream = fs.open(path); // Note that zero-length file will fail to read PB magic, and attempt to create // a non-PB reader and fail the same way existing code expects it to. If we get // rid of the old reader entirely, we need to handle 0-size files differently from // merely non-PB files. byte[] magic = new byte[ProtobufLogReader.PB_WAL_MAGIC.length]; boolean isPbWal = (stream.read(magic) == magic.length) && Arrays.equals(magic, ProtobufLogReader.PB_WAL_MAGIC); DefaultWALProvider.Reader reader = isPbWal ? new ProtobufLogReader() : new SequenceFileLogReader(); reader.init(fs, path, conf, stream); return reader; } } catch (IOException e) { try { if (stream != null) { stream.close(); } } catch (IOException exception) { LOG.warn("Could not close FSDataInputStream" + exception.getMessage()); LOG.debug("exception details", exception); } String msg = e.getMessage(); if (msg != null && (msg.contains("Cannot obtain block length") || msg.contains("Could not obtain the last block") || msg.matches("Blocklist for [^ ]* has changed.*"))) { if (++nbAttempt == 1) { LOG.warn("Lease should have recovered. This is not expected. Will retry", e); } if (reporter != null && !reporter.progress()) { throw new InterruptedIOException("Operation is cancelled"); } if (nbAttempt > 2 && openTimeout < EnvironmentEdgeManager.currentTime()) { LOG.error("Can't open after " + nbAttempt + " attempts and " + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms " + " for " + path); } else { try { Thread.sleep(nbAttempt < 3 ? 500 : 1000); continue; // retry } catch (InterruptedException ie) { InterruptedIOException iioe = new InterruptedIOException(); iioe.initCause(ie); throw iioe; } } } throw e; } } } catch (IOException ie) { throw ie; } catch (Exception e) { throw new IOException("Cannot get log reader", e); } }
From source file:tajo.webapp.HttpServer.java
/** * Start the server. Does not wait for the server to start. *///from w ww .j a va 2 s.co m public void start() throws IOException { try { if (listenerStartedExternally) { // Expect that listener was started // securely if (listener.getLocalPort() == -1) // ... and verify throw new Exception("Exepected webserver's listener to be started " + "previously but wasn't"); // And skip all the port rolling issues. webServer.start(); } else { int port = 0; int oriPort = listener.getPort(); // The original requested port while (true) { try { port = webServer.getConnectors()[0].getLocalPort(); LOG.debug("Port returned by webServer.getConnectors()[0]." + "getLocalPort() before open() is " + port + ". Opening the listener on " + oriPort); listener.open(); port = listener.getLocalPort(); LOG.debug("listener.getLocalPort() returned " + listener.getLocalPort() + " webServer.getConnectors()[0].getLocalPort() returned " + webServer.getConnectors()[0].getLocalPort()); // Workaround to handle the problem reported in HADOOP-4744 if (port < 0) { Thread.sleep(100); int numRetries = 1; while (port < 0) { LOG.warn("listener.getLocalPort returned " + port); if (numRetries++ > MAX_RETRIES) { throw new Exception(" listener.getLocalPort is returning " + "less than 0 even after " + numRetries + " resets"); } for (int i = 0; i < 2; i++) { LOG.info("Retrying listener.getLocalPort()"); port = listener.getLocalPort(); if (port > 0) { break; } Thread.sleep(200); } if (port > 0) { break; } LOG.info("Bouncing the listener"); listener.close(); Thread.sleep(1000); listener.setPort(oriPort == 0 ? 0 : (oriPort += 1)); listener.open(); Thread.sleep(100); port = listener.getLocalPort(); } } // Workaround end LOG.info("Jetty bound to port " + port); webServer.start(); break; } catch (IOException ex) { // if this is a bind exception, // then try the next port number. if (ex instanceof BindException) { if (!findPort) { BindException be = new BindException( "Port in use: " + listener.getHost() + ":" + listener.getPort()); be.initCause(ex); throw be; } } else { LOG.info("HttpServer.start() threw a non Bind IOException"); throw ex; } } catch (MultiException ex) { LOG.info("HttpServer.start() threw a MultiException"); throw ex; } listener.setPort((oriPort += 1)); } } // Make sure there is no handler failures. Handler[] handlers = webServer.getHandlers(); for (int i = 0; i < handlers.length; i++) { if (handlers[i].isFailed()) { throw new IOException("Problem in starting http server. Server handlers failed"); } } } catch (IOException e) { throw e; } catch (InterruptedException e) { throw (IOException) new InterruptedIOException("Interrupted while starting HTTP server").initCause(e); } catch (Exception e) { throw new IOException("Problem starting http server", e); } }
From source file:org.apache.tajo.webapp.HttpServer.java
/** * Start the server. Does not wait for the server to start. */// w w w . j av a 2 s .c om public void start() throws IOException { try { if (listenerStartedExternally) { // Expect that listener was started // securely if (listener.getLocalPort() == -1) // ... and verify throw new Exception("Exepected webserver's listener to be started " + "previously but wasn't"); // And skip all the port rolling issues. webServer.start(); } else { int port; int oriPort = listener.getPort(); // The original requested port while (true) { try { port = webServer.getConnectors()[0].getLocalPort(); LOG.debug("Port returned by webServer.getConnectors()[0]." + "getLocalPort() before open() is " + port + ". Opening the listener on " + oriPort); listener.open(); port = listener.getLocalPort(); LOG.debug("listener.getLocalPort() returned " + listener.getLocalPort() + " webServer.getConnectors()[0].getLocalPort() returned " + webServer.getConnectors()[0].getLocalPort()); // Workaround to handle the problem reported in HADOOP-4744 if (port < 0) { Thread.sleep(100); int numRetries = 1; while (port < 0) { LOG.warn("listener.getLocalPort returned " + port); if (numRetries++ > MAX_RETRIES) { throw new Exception(" listener.getLocalPort is returning " + "less than 0 even after " + numRetries + " resets"); } for (int i = 0; i < 2; i++) { LOG.info("Retrying listener.getLocalPort()"); port = listener.getLocalPort(); if (port > 0) { break; } Thread.sleep(200); } if (port > 0) { break; } LOG.info("Bouncing the listener"); listener.close(); Thread.sleep(1000); listener.setPort(oriPort == 0 ? 0 : (oriPort += 1)); listener.open(); Thread.sleep(100); port = listener.getLocalPort(); } } // Workaround end LOG.info("Jetty bound to port " + port); webServer.start(); break; } catch (IOException ex) { // if this is a bind exception, // then try the next port number. if (ex instanceof BindException) { if (!findPort) { BindException be = new BindException( "Port in use: " + listener.getHost() + ":" + listener.getPort()); be.initCause(ex); throw be; } } else { LOG.info("HttpServer.start() threw a non Bind IOException"); throw ex; } } catch (MultiException ex) { LOG.info("HttpServer.start() threw a MultiException"); throw ex; } listener.setPort((oriPort += 1)); } } // Make sure there is no handler failures. Handler[] handlers = webServer.getHandlers(); for (Handler handler : handlers) { if (handler.isFailed()) { throw new IOException("Problem in starting http server. Server handlers failed"); } } } catch (IOException e) { throw e; } catch (InterruptedException e) { throw (IOException) new InterruptedIOException("Interrupted while starting HTTP server").initCause(e); } catch (Exception e) { throw new IOException("Problem starting http server", e); } }
From source file:org.apache.hadoop.hbase.regionserver.RSRpcServices.java
/** * Starts the nonce operation for a mutation, if needed. * @param mutation Mutation./*from w ww.j a v a 2 s . c o m*/ * @param nonceGroup Nonce group from the request. * @returns Nonce used (can be NO_NONCE). */ private long startNonceOperation(final MutationProto mutation, long nonceGroup) throws IOException, OperationConflictException { if (regionServer.nonceManager == null || !mutation.hasNonce()) return HConstants.NO_NONCE; boolean canProceed = false; try { canProceed = regionServer.nonceManager.startOperation(nonceGroup, mutation.getNonce(), regionServer); } catch (InterruptedException ex) { throw new InterruptedIOException("Nonce start operation interrupted"); } if (!canProceed) { // TODO: instead, we could convert append/increment to get w/mvcc String message = "The operation with nonce {" + nonceGroup + ", " + mutation.getNonce() + "} on row [" + Bytes.toString(mutation.getRow().toByteArray()) + "] may have already completed"; throw new OperationConflictException(message); } return mutation.getNonce(); }
From source file:com.epam.reportportal.apache.http.impl.execchain.MainClientExec.java
public CloseableHttpResponse execute(final HttpRoute route, final HttpRequestWrapper request, final HttpClientContext context, final HttpExecutionAware execAware) throws IOException, HttpException { Args.notNull(route, "HTTP route"); Args.notNull(request, "HTTP request"); Args.notNull(context, "HTTP context"); AuthState targetAuthState = context.getTargetAuthState(); if (targetAuthState == null) { targetAuthState = new AuthState(); context.setAttribute(HttpClientContext.TARGET_AUTH_STATE, targetAuthState); }/*from w w w .j ava 2s . c o m*/ AuthState proxyAuthState = context.getProxyAuthState(); if (proxyAuthState == null) { proxyAuthState = new AuthState(); context.setAttribute(HttpClientContext.PROXY_AUTH_STATE, proxyAuthState); } if (request instanceof HttpEntityEnclosingRequest) { Proxies.enhanceEntity((HttpEntityEnclosingRequest) request); } Object userToken = context.getUserToken(); final ConnectionRequest connRequest = connManager.requestConnection(route, userToken); if (execAware != null) { if (execAware.isAborted()) { connRequest.cancel(); throw new RequestAbortedException("Request aborted"); } else { execAware.setCancellable(connRequest); } } final RequestConfig config = context.getRequestConfig(); final HttpClientConnection managedConn; try { final int timeout = config.getConnectionRequestTimeout(); managedConn = connRequest.get(timeout > 0 ? timeout : 0, TimeUnit.MILLISECONDS); } catch (final InterruptedException interrupted) { Thread.currentThread().interrupt(); throw new RequestAbortedException("Request aborted", interrupted); } catch (final ExecutionException ex) { Throwable cause = ex.getCause(); if (cause == null) { cause = ex; } throw new RequestAbortedException("Request execution failed", cause); } context.setAttribute(HttpCoreContext.HTTP_CONNECTION, managedConn); if (config.isStaleConnectionCheckEnabled()) { // validate connection if (managedConn.isOpen()) { this.log.debug("Stale connection check"); if (managedConn.isStale()) { this.log.debug("Stale connection detected"); managedConn.close(); } } } final ConnectionHolder connHolder = new ConnectionHolder(this.log, this.connManager, managedConn); try { if (execAware != null) { execAware.setCancellable(connHolder); } HttpResponse response; for (int execCount = 1;; execCount++) { if (execCount > 1 && !Proxies.isRepeatable(request)) { throw new NonRepeatableRequestException( "Cannot retry request " + "with a non-repeatable request entity."); } if (execAware != null && execAware.isAborted()) { throw new RequestAbortedException("Request aborted"); } if (!managedConn.isOpen()) { this.log.debug("Opening connection " + route); try { establishRoute(proxyAuthState, managedConn, route, request, context); } catch (final TunnelRefusedException ex) { if (this.log.isDebugEnabled()) { this.log.debug(ex.getMessage()); } response = ex.getResponse(); break; } } final int timeout = config.getSocketTimeout(); if (timeout >= 0) { managedConn.setSocketTimeout(timeout); } if (execAware != null && execAware.isAborted()) { throw new RequestAbortedException("Request aborted"); } if (this.log.isDebugEnabled()) { this.log.debug("Executing request " + request.getRequestLine()); } if (!request.containsHeader(AUTH.WWW_AUTH_RESP)) { if (this.log.isDebugEnabled()) { this.log.debug("Target auth state: " + targetAuthState.getState()); } this.authenticator.generateAuthResponse(request, targetAuthState, context); } if (!request.containsHeader(AUTH.PROXY_AUTH_RESP) && !route.isTunnelled()) { if (this.log.isDebugEnabled()) { this.log.debug("Proxy auth state: " + proxyAuthState.getState()); } this.authenticator.generateAuthResponse(request, proxyAuthState, context); } response = requestExecutor.execute(request, managedConn, context); // The connection is in or can be brought to a re-usable state. if (reuseStrategy.keepAlive(response, context)) { // Set the idle duration of this connection final long duration = keepAliveStrategy.getKeepAliveDuration(response, context); if (this.log.isDebugEnabled()) { final String s; if (duration > 0) { s = "for " + duration + " " + TimeUnit.MILLISECONDS; } else { s = "indefinitely"; } this.log.debug("Connection can be kept alive " + s); } connHolder.setValidFor(duration, TimeUnit.MILLISECONDS); connHolder.markReusable(); } else { connHolder.markNonReusable(); } if (needAuthentication(targetAuthState, proxyAuthState, route, response, context)) { // Make sure the response body is fully consumed, if present final HttpEntity entity = response.getEntity(); if (connHolder.isReusable()) { EntityUtils.consume(entity); } else { managedConn.close(); if (proxyAuthState.getState() == AuthProtocolState.SUCCESS && proxyAuthState.getAuthScheme() != null && proxyAuthState.getAuthScheme().isConnectionBased()) { this.log.debug("Resetting proxy auth state"); proxyAuthState.reset(); } if (targetAuthState.getState() == AuthProtocolState.SUCCESS && targetAuthState.getAuthScheme() != null && targetAuthState.getAuthScheme().isConnectionBased()) { this.log.debug("Resetting target auth state"); targetAuthState.reset(); } } // discard previous auth headers final HttpRequest original = request.getOriginal(); if (!original.containsHeader(AUTH.WWW_AUTH_RESP)) { request.removeHeaders(AUTH.WWW_AUTH_RESP); } if (!original.containsHeader(AUTH.PROXY_AUTH_RESP)) { request.removeHeaders(AUTH.PROXY_AUTH_RESP); } } else { break; } } if (userToken == null) { userToken = userTokenHandler.getUserToken(context); context.setAttribute(HttpClientContext.USER_TOKEN, userToken); } if (userToken != null) { connHolder.setState(userToken); } // check for entity, release connection if possible final HttpEntity entity = response.getEntity(); if (entity == null || !entity.isStreaming()) { // connection not needed and (assumed to be) in re-usable state connHolder.releaseConnection(); return Proxies.enhanceResponse(response, null); } else { return Proxies.enhanceResponse(response, connHolder); } } catch (final ConnectionShutdownException ex) { final InterruptedIOException ioex = new InterruptedIOException("Connection has been shut down"); ioex.initCause(ex); throw ioex; } catch (final HttpException ex) { connHolder.abortConnection(); throw ex; } catch (final IOException ex) { connHolder.abortConnection(); throw ex; } catch (final RuntimeException ex) { connHolder.abortConnection(); throw ex; } }
From source file:org.apache.http.HC4.impl.execchain.MainClientExec.java
@Override public CloseableHttpResponse execute(final HttpRoute route, final HttpRequestWrapper request, final HttpClientContext context, final HttpExecutionAware execAware) throws IOException, HttpException { Args.notNull(route, "HTTP route"); Args.notNull(request, "HTTP request"); Args.notNull(context, "HTTP context"); AuthState targetAuthState = context.getTargetAuthState(); if (targetAuthState == null) { targetAuthState = new AuthState(); context.setAttribute(HttpClientContext.TARGET_AUTH_STATE, targetAuthState); }//from w w w. j a va2 s . com AuthState proxyAuthState = context.getProxyAuthState(); if (proxyAuthState == null) { proxyAuthState = new AuthState(); context.setAttribute(HttpClientContext.PROXY_AUTH_STATE, proxyAuthState); } if (request instanceof HttpEntityEnclosingRequest) { RequestEntityProxy.enhance((HttpEntityEnclosingRequest) request); } Object userToken = context.getUserToken(); final ConnectionRequest connRequest = connManager.requestConnection(route, userToken); if (execAware != null) { if (execAware.isAborted()) { connRequest.cancel(); throw new RequestAbortedException("Request aborted"); } else { execAware.setCancellable(connRequest); } } final RequestConfig config = context.getRequestConfig(); final HttpClientConnection managedConn; try { final int timeout = config.getConnectionRequestTimeout(); managedConn = connRequest.get(timeout > 0 ? timeout : 0, TimeUnit.MILLISECONDS); } catch (final InterruptedException interrupted) { Thread.currentThread().interrupt(); throw new RequestAbortedException("Request aborted", interrupted); } catch (final ExecutionException ex) { Throwable cause = ex.getCause(); if (cause == null) { cause = ex; } throw new RequestAbortedException("Request execution failed", cause); } context.setAttribute(HttpCoreContext.HTTP_CONNECTION, managedConn); if (config.isStaleConnectionCheckEnabled()) { // validate connection if (managedConn.isOpen()) { this.log.debug("Stale connection check"); if (managedConn.isStale()) { this.log.debug("Stale connection detected"); managedConn.close(); } } } final ConnectionHolder connHolder = new ConnectionHolder(this.log, this.connManager, managedConn); try { if (execAware != null) { execAware.setCancellable(connHolder); } HttpResponse response; for (int execCount = 1;; execCount++) { if (execCount > 1 && !RequestEntityProxy.isRepeatable(request)) { throw new NonRepeatableRequestException( "Cannot retry request " + "with a non-repeatable request entity."); } if (execAware != null && execAware.isAborted()) { throw new RequestAbortedException("Request aborted"); } if (!managedConn.isOpen()) { this.log.debug("Opening connection " + route); try { establishRoute(proxyAuthState, managedConn, route, request, context); } catch (final TunnelRefusedException ex) { if (this.log.isDebugEnabled()) { this.log.debug(ex.getMessage()); } response = ex.getResponse(); break; } } final int timeout = config.getSocketTimeout(); if (timeout >= 0) { managedConn.setSocketTimeout(timeout); } if (execAware != null && execAware.isAborted()) { throw new RequestAbortedException("Request aborted"); } if (this.log.isDebugEnabled()) { this.log.debug("Executing request " + request.getRequestLine()); } if (!request.containsHeader(AUTH.WWW_AUTH_RESP)) { if (this.log.isDebugEnabled()) { this.log.debug("Target auth state: " + targetAuthState.getState()); } this.authenticator.generateAuthResponse(request, targetAuthState, context); } if (!request.containsHeader(AUTH.PROXY_AUTH_RESP) && !route.isTunnelled()) { if (this.log.isDebugEnabled()) { this.log.debug("Proxy auth state: " + proxyAuthState.getState()); } this.authenticator.generateAuthResponse(request, proxyAuthState, context); } response = requestExecutor.execute(request, managedConn, context); // The connection is in or can be brought to a re-usable state. if (reuseStrategy.keepAlive(response, context)) { // Set the idle duration of this connection final long duration = keepAliveStrategy.getKeepAliveDuration(response, context); if (this.log.isDebugEnabled()) { final String s; if (duration > 0) { s = "for " + duration + " " + TimeUnit.MILLISECONDS; } else { s = "indefinitely"; } this.log.debug("Connection can be kept alive " + s); } connHolder.setValidFor(duration, TimeUnit.MILLISECONDS); connHolder.markReusable(); } else { connHolder.markNonReusable(); } if (needAuthentication(targetAuthState, proxyAuthState, route, response, context)) { // Make sure the response body is fully consumed, if present final HttpEntity entity = response.getEntity(); if (connHolder.isReusable()) { EntityUtils.consume(entity); } else { managedConn.close(); if (proxyAuthState.getState() == AuthProtocolState.SUCCESS && proxyAuthState.getAuthScheme() != null && proxyAuthState.getAuthScheme().isConnectionBased()) { this.log.debug("Resetting proxy auth state"); proxyAuthState.reset(); } if (targetAuthState.getState() == AuthProtocolState.SUCCESS && targetAuthState.getAuthScheme() != null && targetAuthState.getAuthScheme().isConnectionBased()) { this.log.debug("Resetting target auth state"); targetAuthState.reset(); } } // discard previous auth headers final HttpRequest original = request.getOriginal(); if (!original.containsHeader(AUTH.WWW_AUTH_RESP)) { request.removeHeaders(AUTH.WWW_AUTH_RESP); } if (!original.containsHeader(AUTH.PROXY_AUTH_RESP)) { request.removeHeaders(AUTH.PROXY_AUTH_RESP); } } else { break; } } if (userToken == null) { userToken = userTokenHandler.getUserToken(context); context.setAttribute(HttpClientContext.USER_TOKEN, userToken); } if (userToken != null) { connHolder.setState(userToken); } // check for entity, release connection if possible final HttpEntity entity = response.getEntity(); if (entity == null || !entity.isStreaming()) { // connection not needed and (assumed to be) in re-usable state connHolder.releaseConnection(); return new HttpResponseProxy(response, null); } else { return new HttpResponseProxy(response, connHolder); } } catch (final ConnectionShutdownException ex) { final InterruptedIOException ioex = new InterruptedIOException("Connection has been shut down"); ioex.initCause(ex); throw ioex; } catch (final HttpException ex) { connHolder.abortConnection(); throw ex; } catch (final IOException ex) { connHolder.abortConnection(); throw ex; } catch (final RuntimeException ex) { connHolder.abortConnection(); throw ex; } }