List of usage examples for java.io InterruptedIOException InterruptedIOException
public InterruptedIOException()
InterruptedIOException
with null
as its error detail message. From source file:org.apache.hadoop.hbase.regionserver.SplitTransaction.java
/** * Perform time consuming opening of the daughter regions. * @param server Hosting server instance. Can be null when testing (won't try * and update in zk if a null server)//from ww w . j a v a2s . c o m * @param services Used to online/offline regions. * @param a first daughter region * @param a second daughter region * @throws IOException If thrown, transaction failed. * Call {@link #rollback(Server, RegionServerServices)} */ /* package */void openDaughters(final Server server, final RegionServerServices services, HRegion a, HRegion b) throws IOException { boolean stopped = server != null && server.isStopped(); boolean stopping = services != null && services.isStopping(); // TODO: Is this check needed here? if (stopped || stopping) { LOG.info("Not opening daughters " + b.getRegionInfo().getRegionNameAsString() + " and " + a.getRegionInfo().getRegionNameAsString() + " because stopping=" + stopping + ", stopped=" + stopped); } else { // Open daughters in parallel. DaughterOpener aOpener = new DaughterOpener(server, a); DaughterOpener bOpener = new DaughterOpener(server, b); aOpener.start(); bOpener.start(); try { aOpener.join(); bOpener.join(); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } if (aOpener.getException() != null) { throw new IOException("Failed " + aOpener.getName(), aOpener.getException()); } if (bOpener.getException() != null) { throw new IOException("Failed " + bOpener.getName(), bOpener.getException()); } if (services != null) { try { // add 2nd daughter first (see HBASE-4335) services.postOpenDeployTasks(b, server.getCatalogTracker()); // Should add it to OnlineRegions services.addToOnlineRegions(b); services.postOpenDeployTasks(a, server.getCatalogTracker()); services.addToOnlineRegions(a); } catch (KeeperException ke) { throw new IOException(ke); } } } }
From source file:org.apache.hadoop.hbase.regionserver.SplitTransactionImpl.java
/** * Perform time consuming opening of the daughter regions. * @param server Hosting server instance. Can be null when testing * @param services Used to online/offline regions. * @param a first daughter region//from ww w. java 2 s. c o m * @param a second daughter region * @throws IOException If thrown, transaction failed. * Call {@link #rollback(Server, RegionServerServices)} */ @VisibleForTesting void openDaughters(final Server server, final RegionServerServices services, Region a, Region b) throws IOException { boolean stopped = server != null && server.isStopped(); boolean stopping = services != null && services.isStopping(); // TODO: Is this check needed here? if (stopped || stopping) { LOG.info("Not opening daughters " + b.getRegionInfo().getRegionNameAsString() + " and " + a.getRegionInfo().getRegionNameAsString() + " because stopping=" + stopping + ", stopped=" + stopped); } else { // Open daughters in parallel. DaughterOpener aOpener = new DaughterOpener(server, a); DaughterOpener bOpener = new DaughterOpener(server, b); aOpener.start(); bOpener.start(); try { aOpener.join(); if (aOpener.getException() == null) { transition(SplitTransactionPhase.OPENED_REGION_A); } bOpener.join(); if (bOpener.getException() == null) { transition(SplitTransactionPhase.OPENED_REGION_B); } } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } if (aOpener.getException() != null) { throw new IOException("Failed " + aOpener.getName(), aOpener.getException()); } if (bOpener.getException() != null) { throw new IOException("Failed " + bOpener.getName(), bOpener.getException()); } if (services != null) { if (!services.reportRegionStateTransition(TransitionCode.SPLIT, parent.getRegionInfo(), hri_a, hri_b)) { throw new IOException("Failed to report split region to master: " + parent.getRegionInfo().getShortNameToLog()); } // Should add it to OnlineRegions services.addToOnlineRegions(b); services.addToOnlineRegions(a); } } }
From source file:com.sa.npopa.samples.hbase.rest.client.RemoteHTable.java
@Override public void delete(Delete delete) throws IOException { String spec = buildRowSpec(delete.getRow(), delete.getFamilyCellMap(), delete.getTimeStamp(), delete.getTimeStamp(), 1);/*from w ww.j a va 2 s . co m*/ for (int i = 0; i < maxRetries; i++) { Response response = client.delete(spec); int code = response.getCode(); switch (code) { case 200: return; case 509: try { Thread.sleep(sleepTime); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } break; default: throw new IOException("delete request failed with " + code); } } throw new IOException("delete request timed out"); }
From source file:org.apache.http.impl.client.DefaultRequestDirector.java
public HttpResponse execute(final HttpHost targetHost, final HttpRequest request, final HttpContext context) throws HttpException, IOException { context.setAttribute(ClientContext.TARGET_AUTH_STATE, targetAuthState); context.setAttribute(ClientContext.PROXY_AUTH_STATE, proxyAuthState); HttpHost target = targetHost;/*from ww w . ja v a 2 s .c o m*/ final HttpRequest orig = request; final RequestWrapper origWrapper = wrapRequest(orig); origWrapper.setParams(params); final HttpRoute origRoute = determineRoute(target, origWrapper, context); virtualHost = (HttpHost) origWrapper.getParams().getParameter(ClientPNames.VIRTUAL_HOST); // HTTPCLIENT-1092 - add the port if necessary if (virtualHost != null && virtualHost.getPort() == -1) { final HttpHost host = (target != null) ? target : origRoute.getTargetHost(); final int port = host.getPort(); if (port != -1) { virtualHost = new HttpHost(virtualHost.getHostName(), port, virtualHost.getSchemeName()); } } RoutedRequest roureq = new RoutedRequest(origWrapper, origRoute); boolean reuse = false; boolean done = false; try { HttpResponse response = null; while (!done) { // In this loop, the RoutedRequest may be replaced by a // followup request and route. The request and route passed // in the method arguments will be replaced. The original // request is still available in 'orig'. final RequestWrapper wrapper = roureq.getRequest(); final HttpRoute route = roureq.getRoute(); response = null; // See if we have a user token bound to the execution context Object userToken = context.getAttribute(ClientContext.USER_TOKEN); // Allocate connection if needed if (managedConn == null) { final ClientConnectionRequest connRequest = connManager.requestConnection(route, userToken); if (orig instanceof AbortableHttpRequest) { ((AbortableHttpRequest) orig).setConnectionRequest(connRequest); } final long timeout = HttpClientParams.getConnectionManagerTimeout(params); try { managedConn = connRequest.getConnection(timeout, TimeUnit.MILLISECONDS); } catch (final InterruptedException interrupted) { Thread.currentThread().interrupt(); throw new InterruptedIOException(); } if (HttpConnectionParams.isStaleCheckingEnabled(params)) { // validate connection if (managedConn.isOpen()) { this.log.debug("Stale connection check"); if (managedConn.isStale()) { this.log.debug("Stale connection detected"); managedConn.close(); } } } } if (orig instanceof AbortableHttpRequest) { ((AbortableHttpRequest) orig).setReleaseTrigger(managedConn); } try { tryConnect(roureq, context); } catch (final TunnelRefusedException ex) { if (this.log.isDebugEnabled()) { this.log.debug(ex.getMessage()); } response = ex.getResponse(); break; } final String userinfo = wrapper.getURI().getUserInfo(); if (userinfo != null) { targetAuthState.update(new BasicScheme(), new UsernamePasswordCredentials(userinfo)); } // Get target. Even if there's virtual host, we may need the target to set the port. if (virtualHost != null) { target = virtualHost; } else { final URI requestURI = wrapper.getURI(); if (requestURI.isAbsolute()) { target = URIUtils.extractHost(requestURI); } } if (target == null) { target = route.getTargetHost(); } // Reset headers on the request wrapper wrapper.resetHeaders(); // Re-write request URI if needed rewriteRequestURI(wrapper, route); // Populate the execution context context.setAttribute(ExecutionContext.HTTP_TARGET_HOST, target); context.setAttribute(ClientContext.ROUTE, route); context.setAttribute(ExecutionContext.HTTP_CONNECTION, managedConn); // Run request protocol interceptors requestExec.preProcess(wrapper, httpProcessor, context); response = tryExecute(roureq, context); if (response == null) { // Need to start over continue; } // Run response protocol interceptors response.setParams(params); requestExec.postProcess(response, httpProcessor, context); // The connection is in or can be brought to a re-usable state. reuse = reuseStrategy.keepAlive(response, context); if (reuse) { // Set the idle duration of this connection final long duration = keepAliveStrategy.getKeepAliveDuration(response, context); if (this.log.isDebugEnabled()) { final String s; if (duration > 0) { s = "for " + duration + " " + TimeUnit.MILLISECONDS; } else { s = "indefinitely"; } this.log.debug("Connection can be kept alive " + s); } managedConn.setIdleDuration(duration, TimeUnit.MILLISECONDS); } final RoutedRequest followup = handleResponse(roureq, response, context); if (followup == null) { done = true; } else { if (reuse) { // Make sure the response body is fully consumed, if present final HttpEntity entity = response.getEntity(); EntityUtils.consume(entity); // entity consumed above is not an auto-release entity, // need to mark the connection re-usable explicitly managedConn.markReusable(); } else { managedConn.close(); if (proxyAuthState.getState().compareTo(AuthProtocolState.CHALLENGED) > 0 && proxyAuthState.getAuthScheme() != null && proxyAuthState.getAuthScheme().isConnectionBased()) { this.log.debug("Resetting proxy auth state"); proxyAuthState.reset(); } if (targetAuthState.getState().compareTo(AuthProtocolState.CHALLENGED) > 0 && targetAuthState.getAuthScheme() != null && targetAuthState.getAuthScheme().isConnectionBased()) { this.log.debug("Resetting target auth state"); targetAuthState.reset(); } } // check if we can use the same connection for the followup if (!followup.getRoute().equals(roureq.getRoute())) { releaseConnection(); } roureq = followup; } if (managedConn != null) { if (userToken == null) { userToken = userTokenHandler.getUserToken(context); context.setAttribute(ClientContext.USER_TOKEN, userToken); } if (userToken != null) { managedConn.setState(userToken); } } } // while not done // check for entity, release connection if possible if ((response == null) || (response.getEntity() == null) || !response.getEntity().isStreaming()) { // connection not needed and (assumed to be) in re-usable state if (reuse) { managedConn.markReusable(); } releaseConnection(); } else { // install an auto-release entity HttpEntity entity = response.getEntity(); entity = new BasicManagedEntity(entity, managedConn, reuse); response.setEntity(entity); } return response; } catch (final ConnectionShutdownException ex) { final InterruptedIOException ioex = new InterruptedIOException("Connection has been shut down"); ioex.initCause(ex); throw ioex; } catch (final HttpException ex) { abortConnection(); throw ex; } catch (final IOException ex) { abortConnection(); throw ex; } catch (final RuntimeException ex) { abortConnection(); throw ex; } }
From source file:org.apache.http2.impl.client.DefaultRequestDirector.java
public HttpResponse execute(HttpHost target, HttpRequest request, HttpContext context) throws HttpException, IOException { context.setAttribute(ClientContext.TARGET_AUTH_STATE, targetAuthState); context.setAttribute(ClientContext.PROXY_AUTH_STATE, proxyAuthState); HttpRequest orig = request;//from w w w .java 2s .c o m RequestWrapper origWrapper = wrapRequest(orig); origWrapper.setParams(params); HttpRoute origRoute = determineRoute(target, origWrapper, context); virtualHost = (HttpHost) origWrapper.getParams().getParameter(ClientPNames.VIRTUAL_HOST); // HTTPCLIENT-1092 - add the port if necessary if (virtualHost != null && virtualHost.getPort() == -1) { HttpHost host = (target != null) ? target : origRoute.getTargetHost(); int port = host.getPort(); if (port != -1) { virtualHost = new HttpHost(virtualHost.getHostName(), port, virtualHost.getSchemeName()); } } RoutedRequest roureq = new RoutedRequest(origWrapper, origRoute); boolean reuse = false; boolean done = false; try { HttpResponse response = null; while (!done) { // In this loop, the RoutedRequest may be replaced by a // followup request and route. The request and route passed // in the method arguments will be replaced. The original // request is still available in 'orig'. RequestWrapper wrapper = roureq.getRequest(); HttpRoute route = roureq.getRoute(); response = null; // See if we have a user token bound to the execution context Object userToken = context.getAttribute(ClientContext.USER_TOKEN); // Allocate connection if needed if (managedConn == null) { ClientConnectionRequest connRequest = connManager.requestConnection(route, userToken); if (orig instanceof AbortableHttpRequest) { ((AbortableHttpRequest) orig).setConnectionRequest(connRequest); } long timeout = HttpClientParams.getConnectionManagerTimeout(params); try { managedConn = connRequest.getConnection(timeout, TimeUnit.MILLISECONDS); } catch (InterruptedException interrupted) { InterruptedIOException iox = new InterruptedIOException(); iox.initCause(interrupted); throw iox; } if (HttpConnectionParams.isStaleCheckingEnabled(params)) { // validate connection if (managedConn.isOpen()) { this.log.debug("Stale connection check"); if (managedConn.isStale()) { this.log.debug("Stale connection detected"); managedConn.close(); } } } } if (orig instanceof AbortableHttpRequest) { ((AbortableHttpRequest) orig).setReleaseTrigger(managedConn); } try { tryConnect(roureq, context); } catch (TunnelRefusedException ex) { if (this.log.isDebugEnabled()) { this.log.debug(ex.getMessage()); } response = ex.getResponse(); break; } String userinfo = wrapper.getURI().getUserInfo(); if (userinfo != null) { targetAuthState.update(new BasicScheme(), new UsernamePasswordCredentials(userinfo)); } HttpHost proxy = route.getProxyHost(); if (virtualHost != null) { target = virtualHost; } else { URI requestURI = wrapper.getURI(); if (requestURI.isAbsolute()) { target = new HttpHost(requestURI.getHost(), requestURI.getPort(), requestURI.getScheme()); } } if (target == null) { target = route.getTargetHost(); } // Reset headers on the request wrapper wrapper.resetHeaders(); // Re-write request URI if needed rewriteRequestURI(wrapper, route); // Populate the execution context context.setAttribute(ExecutionContext.HTTP_TARGET_HOST, target); context.setAttribute(ExecutionContext.HTTP_PROXY_HOST, proxy); context.setAttribute(ExecutionContext.HTTP_CONNECTION, managedConn); // Run request protocol interceptors requestExec.preProcess(wrapper, httpProcessor, context); response = tryExecute(roureq, context); if (response == null) { // Need to start over continue; } // Run response protocol interceptors response.setParams(params); requestExec.postProcess(response, httpProcessor, context); // The connection is in or can be brought to a re-usable state. reuse = reuseStrategy.keepAlive(response, context); if (reuse) { // Set the idle duration of this connection long duration = keepAliveStrategy.getKeepAliveDuration(response, context); if (this.log.isDebugEnabled()) { String s; if (duration > 0) { s = "for " + duration + " " + TimeUnit.MILLISECONDS; } else { s = "indefinitely"; } this.log.debug("Connection can be kept alive " + s); } managedConn.setIdleDuration(duration, TimeUnit.MILLISECONDS); } RoutedRequest followup = handleResponse(roureq, response, context); if (followup == null) { done = true; } else { if (reuse) { // Make sure the response body is fully consumed, if present HttpEntity entity = response.getEntity(); EntityUtils.consume(entity); // entity consumed above is not an auto-release entity, // need to mark the connection re-usable explicitly managedConn.markReusable(); } else { managedConn.close(); if (proxyAuthState.getState().compareTo(AuthProtocolState.CHALLENGED) > 0 && proxyAuthState.getAuthScheme() != null && proxyAuthState.getAuthScheme().isConnectionBased()) { this.log.debug("Resetting proxy auth state"); proxyAuthState.reset(); } if (targetAuthState.getState().compareTo(AuthProtocolState.CHALLENGED) > 0 && targetAuthState.getAuthScheme() != null && targetAuthState.getAuthScheme().isConnectionBased()) { this.log.debug("Resetting target auth state"); targetAuthState.reset(); } } // check if we can use the same connection for the followup if (!followup.getRoute().equals(roureq.getRoute())) { releaseConnection(); } roureq = followup; } if (managedConn != null) { if (userToken == null) { userToken = userTokenHandler.getUserToken(context); context.setAttribute(ClientContext.USER_TOKEN, userToken); } if (userToken != null) { managedConn.setState(userToken); } } } // while not done // check for entity, release connection if possible if ((response == null) || (response.getEntity() == null) || !response.getEntity().isStreaming()) { // connection not needed and (assumed to be) in re-usable state if (reuse) managedConn.markReusable(); releaseConnection(); } else { // install an auto-release entity HttpEntity entity = response.getEntity(); entity = new BasicManagedEntity(entity, managedConn, reuse); response.setEntity(entity); } return response; } catch (ConnectionShutdownException ex) { InterruptedIOException ioex = new InterruptedIOException("Connection has been shut down"); ioex.initCause(ex); throw ioex; } catch (HttpException ex) { abortConnection(); throw ex; } catch (IOException ex) { abortConnection(); throw ex; } catch (RuntimeException ex) { abortConnection(); throw ex; } }
From source file:org.apache.hadoop.hbase.wal.WALSplitter.java
/** * log splitting implementation, splits one log file. * @param logfile should be an actual log file. *///from ww w . j a va 2 s .com boolean splitLogFile(FileStatus logfile, CancelableProgressable reporter) throws IOException { Preconditions.checkState(status == null); Preconditions.checkArgument(logfile.isFile(), "passed in file status is for something other than a regular file."); boolean isCorrupted = false; boolean skipErrors = conf.getBoolean("hbase.hlog.split.skip.errors", SPLIT_SKIP_ERRORS_DEFAULT); int interval = conf.getInt("hbase.splitlog.report.interval.loglines", 1024); Path logPath = logfile.getPath(); boolean outputSinkStarted = false; boolean progress_failed = false; int editsCount = 0; int editsSkipped = 0; status = TaskMonitor.get() .createStatus("Splitting log file " + logfile.getPath() + "into a temporary staging area."); Reader in = null; try { long logLength = logfile.getLen(); LOG.info("Splitting wal: " + logPath + ", length=" + logLength); LOG.info("DistributedLogReplay = " + this.distributedLogReplay); status.setStatus("Opening log file"); if (reporter != null && !reporter.progress()) { progress_failed = true; return false; } try { in = getReader(logfile, skipErrors, reporter); } catch (CorruptedLogFileException e) { LOG.warn("Could not get reader, corrupted log file " + logPath, e); ZKSplitLog.markCorrupted(rootDir, logfile.getPath().getName(), fs); isCorrupted = true; } if (in == null) { LOG.warn("Nothing to split in log file " + logPath); return true; } int numOpenedFilesBeforeReporting = conf.getInt("hbase.splitlog.report.openedfiles", 3); int numOpenedFilesLastCheck = 0; outputSink.setReporter(reporter); outputSink.startWriterThreads(); outputSinkStarted = true; Entry entry; Long lastFlushedSequenceId = -1L; ServerName serverName = DefaultWALProvider.getServerNameFromWALDirectoryName(logPath); failedServerName = (serverName == null) ? "" : serverName.getServerName(); while ((entry = getNextLogLine(in, logPath, skipErrors)) != null) { byte[] region = entry.getKey().getEncodedRegionName(); String encodedRegionNameAsStr = Bytes.toString(region); lastFlushedSequenceId = lastFlushedSequenceIds.get(encodedRegionNameAsStr); if (lastFlushedSequenceId == null) { if (this.distributedLogReplay) { RegionStoreSequenceIds ids = csm.getSplitLogWorkerCoordination() .getRegionFlushedSequenceId(failedServerName, encodedRegionNameAsStr); if (ids != null) { lastFlushedSequenceId = ids.getLastFlushedSequenceId(); if (LOG.isDebugEnabled()) { LOG.debug("DLR Last flushed sequenceid for " + encodedRegionNameAsStr + ": " + TextFormat.shortDebugString(ids)); } } } else if (sequenceIdChecker != null) { RegionStoreSequenceIds ids = sequenceIdChecker.getLastSequenceId(region); Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR); for (StoreSequenceId storeSeqId : ids.getStoreSequenceIdList()) { maxSeqIdInStores.put(storeSeqId.getFamilyName().toByteArray(), storeSeqId.getSequenceId()); } regionMaxSeqIdInStores.put(encodedRegionNameAsStr, maxSeqIdInStores); lastFlushedSequenceId = ids.getLastFlushedSequenceId(); if (LOG.isDebugEnabled()) { LOG.debug("DLS Last flushed sequenceid for " + encodedRegionNameAsStr + ": " + TextFormat.shortDebugString(ids)); } } if (lastFlushedSequenceId == null) { lastFlushedSequenceId = -1L; } lastFlushedSequenceIds.put(encodedRegionNameAsStr, lastFlushedSequenceId); } if (lastFlushedSequenceId >= entry.getKey().getLogSeqNum()) { editsSkipped++; continue; } entryBuffers.appendEntry(entry); editsCount++; int moreWritersFromLastCheck = this.getNumOpenWriters() - numOpenedFilesLastCheck; // If sufficient edits have passed, check if we should report progress. if (editsCount % interval == 0 || moreWritersFromLastCheck > numOpenedFilesBeforeReporting) { numOpenedFilesLastCheck = this.getNumOpenWriters(); String countsStr = (editsCount - (editsSkipped + outputSink.getSkippedEdits())) + " edits, skipped " + editsSkipped + " edits."; status.setStatus("Split " + countsStr); if (reporter != null && !reporter.progress()) { progress_failed = true; return false; } } } } catch (InterruptedException ie) { IOException iie = new InterruptedIOException(); iie.initCause(ie); throw iie; } catch (CorruptedLogFileException e) { LOG.warn("Could not parse, corrupted log file " + logPath, e); csm.getSplitLogWorkerCoordination().markCorrupted(rootDir, logfile.getPath().getName(), fs); isCorrupted = true; } catch (IOException e) { e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e; throw e; } finally { LOG.debug("Finishing writing output logs and closing down."); try { if (null != in) { in.close(); } } catch (IOException exception) { LOG.warn("Could not close wal reader: " + exception.getMessage()); LOG.debug("exception details", exception); } try { if (outputSinkStarted) { // Set progress_failed to true as the immediate following statement will reset its value // when finishWritingAndClose() throws exception, progress_failed has the right value progress_failed = true; progress_failed = outputSink.finishWritingAndClose() == null; } } finally { String msg = "Processed " + editsCount + " edits across " + outputSink.getNumberOfRecoveredRegions() + " regions; edits skipped=" + editsSkipped + "; log file=" + logPath + ", length=" + logfile.getLen() + // See if length got updated post lease recovery ", corrupted=" + isCorrupted + ", progress failed=" + progress_failed; LOG.info(msg); status.markComplete(msg); } } return !progress_failed; }
From source file:org.echocat.jomon.net.dns.DnsServer.java
@Nonnull private Socket accept(@Nonnull ServerSocket sock) throws IOException { try {/*from ww w . j a v a 2s.c om*/ return sock.accept(); } catch (final SocketException e) { if (sock.isClosed()) { final InterruptedIOException toThrow = new InterruptedIOException(); toThrow.initCause(e); throw toThrow; } else { throw e; } } }
From source file:org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.java
/** * @return A Multimap<startkey, LoadQueueItem> that groups LQI by likely * bulk load region targets./*from ww w .j ava2 s . c o m*/ */ private Multimap<ByteBuffer, LoadQueueItem> groupOrSplitPhase(final HTable table, ExecutorService pool, Deque<LoadQueueItem> queue, final Pair<byte[][], byte[][]> startEndKeys) throws IOException { // <region start key, LQI> need synchronized only within this scope of this // phase because of the puts that happen in futures. Multimap<ByteBuffer, LoadQueueItem> rgs = HashMultimap.create(); final Multimap<ByteBuffer, LoadQueueItem> regionGroups = Multimaps.synchronizedMultimap(rgs); // drain LQIs and figure out bulk load groups Set<Future<List<LoadQueueItem>>> splittingFutures = new HashSet<Future<List<LoadQueueItem>>>(); while (!queue.isEmpty()) { final LoadQueueItem item = queue.remove(); final Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() { public List<LoadQueueItem> call() throws Exception { List<LoadQueueItem> splits = groupOrSplit(regionGroups, item, table, startEndKeys); return splits; } }; splittingFutures.add(pool.submit(call)); } // get all the results. All grouping and splitting must finish before // we can attempt the atomic loads. for (Future<List<LoadQueueItem>> lqis : splittingFutures) { try { List<LoadQueueItem> splits = lqis.get(); if (splits != null) { queue.addAll(splits); } } catch (ExecutionException e1) { Throwable t = e1.getCause(); if (t instanceof IOException) { LOG.error("IOException during splitting", e1); throw (IOException) t; // would have been thrown if not parallelized, } LOG.error("Unexpected execution exception during splitting", e1); throw new IllegalStateException(t); } catch (InterruptedException e1) { LOG.error("Unexpected interrupted exception during splitting", e1); throw (InterruptedIOException) new InterruptedIOException().initCause(e1); } } return regionGroups; }
From source file:org.echocat.jomon.net.dns.DnsServer.java
private static void receive(@Nonnull DatagramSocket sock, @Nonnull DatagramPacket indp) throws IOException { try {// w w w. ja v a 2 s. c o m sock.receive(indp); } catch (final SocketException e) { if (sock.isClosed()) { final InterruptedIOException toThrow = new InterruptedIOException(); toThrow.initCause(e); throw toThrow; } else { throw e; } } }
From source file:org.kohsuke.github.GHRepository.java
/** * Forks this repository as your repository. * * @return// w w w.j a va 2 s. c o m * Newly forked repository that belong to you. */ public GHRepository fork() throws IOException { new Requester(root).method("POST").to(getApiTailUrl("forks"), null); // this API is asynchronous. we need to wait for a bit for (int i = 0; i < 10; i++) { GHRepository r = root.getMyself().getRepository(name); if (r != null) return r; try { Thread.sleep(3000); } catch (InterruptedException e) { throw (IOException) new InterruptedIOException().initCause(e); } } throw new IOException(this + " was forked but can't find the new repository"); }