List of usage examples for java.util.concurrent ExecutionException getCause
public synchronized Throwable getCause()
From source file:org.apache.hadoop.hbase.regionserver.RegionScannerHolder.java
/** * Get the prefetched scan result, if any. Otherwise, * do a scan synchronously and return the result, which * may take some time. Region scan coprocessor, if specified, * is invoked properly, which may override the scan result. * * @param rows the number of rows to scan, which is preferred * not to change among scanner.next() calls. * * @return scan result, which has the data retrieved from * the scanner, or some IOException if the scan failed. * @throws IOException if failed to retrieve from the scanner. */// w w w . j av a 2 s . c om public ScanResult getScanResult(final int rows) throws IOException { Preconditions.checkArgument(rows > 0, "Number of rows requested must be positive"); ScanResult scanResult = null; this.rows = rows; if (prefetchScanFuture == null) { // Need to scan inline if not prefetched scanResult = prefetcher.call(); } else { // if we have a prefetched result, then use it try { scanResult = prefetchScanFuture.get(); if (scanResult.moreResults) { int prefetchedRows = scanResult.results.size(); if (prefetchedRows != 0 && this.rows > prefetchedRows) { // Try to scan more since we haven't prefetched enough this.rows -= prefetchedRows; ScanResult tmp = prefetcher.call(); if (tmp.isException) { return tmp; // Keep the prefetched results for later } if (tmp.results != null && !tmp.results.isEmpty()) { // Merge new results to the old result list scanResult.results.addAll(tmp.results); } // Reset rows for next prefetching this.rows = rows; } } prefetchScanFuture = null; if (prefetchedResultSize > 0) { globalPrefetchedResultSize.addAndGet(-prefetchedResultSize); prefetchedResultSize = 0L; } } catch (ExecutionException ee) { throw new IOException("failed to run prefetching task", ee.getCause()); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); IOException iie = new InterruptedIOException("scan was interrupted"); iie.initCause(ie); throw iie; } } if (prefetching && scanResult.moreResults && !scanResult.results.isEmpty()) { long totalPrefetchedResultSize = globalPrefetchedResultSize.get(); if (totalPrefetchedResultSize < maxGlobalPrefetchedResultSize) { // Schedule a background prefetch for the next result // if prefetch is enabled on scans and there are more results prefetchScanFuture = scanPrefetchThreadPool.submit(prefetcher); } else if (LOG.isTraceEnabled()) { LOG.trace("One prefetching is skipped for scanner " + scannerName + " since total prefetched result size " + totalPrefetchedResultSize + " is more than the maximum configured " + maxGlobalPrefetchedResultSize); } } return scanResult; }
From source file:org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker.java
/** * Check all the regiondirs in the specified tableDir * * @param tableDir/*from ww w.ja va 2s.c o m*/ * path to a table * @throws IOException */ void checkTableDir(Path tableDir) throws IOException { FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs)); if (rds.length == 0 && !fs.exists(tableDir)) { // interestingly listStatus does not throw an exception if the path does not exist. LOG.warn( "Table Directory " + tableDir + " does not exist. Likely due to concurrent delete. Skipping."); missing.add(tableDir); return; } // Parallelize check at the region dir level List<RegionDirChecker> rdcs = new ArrayList<RegionDirChecker>(); List<Future<Void>> rdFutures; for (FileStatus rdFs : rds) { Path rdDir = rdFs.getPath(); RegionDirChecker work = new RegionDirChecker(rdDir); rdcs.add(work); } // Submit and wait for completion try { rdFutures = executor.invokeAll(rdcs); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); LOG.warn("Region dirs checking interrupted!", ie); return; } for (int i = 0; i < rdFutures.size(); i++) { Future<Void> f = rdFutures.get(i); try { f.get(); } catch (ExecutionException e) { LOG.warn("Failed to quaratine an HFile in regiondir " + rdcs.get(i).regionDir, e.getCause()); // rethrow IOExceptions if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); } // rethrow RuntimeExceptions if (e.getCause() instanceof RuntimeException) { throw (RuntimeException) e.getCause(); } // this should never happen LOG.error("Unexpected exception encountered", e); return; // bailing out. } catch (InterruptedException ie) { Thread.currentThread().interrupt(); LOG.warn("Region dirs check interrupted!", ie); // bailing out return; } } }
From source file:org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.java
/** * Do the shipping logic/* ww w . j a v a 2s .c om*/ */ @Override public boolean replicate(ReplicateContext replicateContext) { List<Entry> entries = replicateContext.getEntries(); String walGroupId = replicateContext.getWalGroupId(); int sleepMultiplier = 1; if (!peersSelected && this.isRunning()) { connectToPeers(); peersSelected = true; } // minimum of: configured threads, number of 100-waledit batches, // and number of current sinks int n = Math.min(Math.min(this.maxThreads, entries.size() / 100 + 1), replicationSinkMgr.getSinks().size()); List<List<Entry>> entryLists = new ArrayList<List<Entry>>(n); if (n == 1) { entryLists.add(entries); } else { for (int i = 0; i < n; i++) { entryLists.add(new ArrayList<Entry>(entries.size() / n + 1)); } // now group by region for (Entry e : entries) { entryLists.get(Math.abs(Bytes.hashCode(e.getKey().getEncodedRegionName()) % n)).add(e); } } while (this.isRunning()) { if (!isPeerEnabled()) { if (sleepForRetries("Replication is disabled", sleepMultiplier)) { sleepMultiplier++; } continue; } try { if (LOG.isTraceEnabled()) { LOG.trace("Replicating " + entries.size() + " entries of total size " + replicateContext.getSize()); } List<Future<Integer>> futures = new ArrayList<Future<Integer>>(entryLists.size()); for (int i = 0; i < entryLists.size(); i++) { if (!entryLists.get(i).isEmpty()) { if (LOG.isTraceEnabled()) { LOG.trace("Submitting " + entryLists.get(i).size() + " entries of total size " + replicateContext.getSize()); } // RuntimeExceptions encountered here bubble up and are handled in ReplicationSource futures.add(exec.submit(new Replicator(entryLists.get(i), i))); } } IOException iox = null; for (Future<Integer> f : futures) { try { // wait for all futures, remove successful parts // (only the remaining parts will be retried) entryLists.remove(f.get()); } catch (InterruptedException ie) { iox = new IOException(ie); } catch (ExecutionException ee) { // cause must be an IOException iox = (IOException) ee.getCause(); } } if (iox != null) { // if we had any exceptions, try again throw iox; } // update metrics this.metrics.setAgeOfLastShippedOp(entries.get(entries.size() - 1).getKey().getWriteTime(), walGroupId); return true; } catch (IOException ioe) { // Didn't ship anything, but must still age the last time we did this.metrics.refreshAgeOfLastShippedOp(walGroupId); if (ioe instanceof RemoteException) { ioe = ((RemoteException) ioe).unwrapRemoteException(); LOG.warn("Can't replicate because of an error on the remote cluster: ", ioe); if (ioe instanceof TableNotFoundException) { if (sleepForRetries("A table is missing in the peer cluster. " + "Replication cannot proceed without losing data.", sleepMultiplier)) { sleepMultiplier++; } } } else { if (ioe instanceof SocketTimeoutException) { // This exception means we waited for more than 60s and nothing // happened, the cluster is alive and calling it right away // even for a test just makes things worse. sleepForRetries( "Encountered a SocketTimeoutException. Since the " + "call to the remote cluster timed out, which is usually " + "caused by a machine failure or a massive slowdown", this.socketTimeoutMultiplier); } else if (ioe instanceof ConnectException) { LOG.warn("Peer is unavailable, rechecking all sinks: ", ioe); replicationSinkMgr.chooseSinks(); } else { LOG.warn("Can't replicate because of a local or network error: ", ioe); } } if (sleepForRetries("Since we are unable to replicate", sleepMultiplier)) { sleepMultiplier++; } } } return false; // in case we exited before replicating }
From source file:org.apache.avro.ipc.RestRequestor.java
private Object request(String messageName, Object request) throws Exception { // Initialize request RestRequest rpcRequest = new RestRequest(messageName, request, new RPCContext()); CallFuture<Object> future = /* only need a Future for two-way messages */ rpcRequest.getMessage().isOneWay() ? null : new CallFuture<Object>(); // Send request request(rpcRequest, future);//w ww .ja v a 2 s.com if (future == null) // the message is one-way, so return immediately return null; try { // the message is two-way, wait for the result return future.get(); } catch (ExecutionException e) { if (e.getCause() instanceof Exception) { throw (Exception) e.getCause(); } else { throw new AvroRemoteException(e.getCause()); } } }
From source file:org.apache.kylin.dict.DictionaryManager.java
public DictionaryInfo getDictionaryInfo(final String resourcePath) throws IOException { try {//from w w w.j av a 2s . com DictionaryInfo result = dictCache.get(resourcePath); if (result == NONE_INDICATOR) { return null; } else { return result; } } catch (ExecutionException e) { throw new RuntimeException(e.getCause()); } }
From source file:org.limewire.mojito.CacheForwardTest.java
@SuppressWarnings("unchecked") public void testGetSecurityToken() throws Exception { MojitoDHT dht1 = null;/* ww w .j av a 2 s . com*/ MojitoDHT dht2 = null; try { // Setup the first instance so that it thinks it's bootstrapping dht1 = MojitoFactory.createDHT(); dht1.bind(2000); dht1.start(); Context context1 = (Context) dht1; UnitTestUtils.setBootstrapping(dht1, true); assertFalse(dht1.isBootstrapped()); assertTrue(context1.isBootstrapping()); // And setup the second instance so that it thinks it's bootstrapped dht2 = MojitoFactory.createDHT(); dht2.bind(3000); dht2.start(); Context context2 = (Context) dht2; UnitTestUtils.setBootstrapped(dht2, true); assertTrue(dht2.isBootstrapped()); assertFalse(context2.isBootstrapping()); // Get the SecurityToken... Class clazz = Class.forName("org.limewire.mojito.manager.StoreProcess$GetSecurityTokenHandler"); Constructor<DHTTask<Result>> con = clazz.getDeclaredConstructor(Context.class, Contact.class); con.setAccessible(true); DHTTask<Result> task = con.newInstance(context2, context1.getLocalNode()); CallableDHTTask<Result> callable = new CallableDHTTask<Result>(task); try { Result result = callable.call(); clazz = Class.forName("org.limewire.mojito.manager.StoreProcess$GetSecurityTokenResult"); Method m = clazz.getDeclaredMethod("getSecurityToken", new Class[0]); m.setAccessible(true); SecurityToken securityToken = (SecurityToken) m.invoke(result, new Object[0]); assertNotNull(securityToken); } catch (ExecutionException err) { assertInstanceof(DHTException.class, err.getCause()); fail("DHT-1 did not return a SecurityToken", err); } } finally { if (dht1 != null) { dht1.close(); } if (dht2 != null) { dht2.close(); } } }
From source file:com.taobao.common.tedis.group.ReliableAsynTedisGroup.java
public void init() { if (!inited) { try {/*from w w w. jav a 2 s. c o m*/ if (this.cm == null) { this.cm = new DiamondConfigManager(appName, version); } this.ospreyManager = new OspreyManager("tedis-" + appName + "-" + version); this.ospreyManager.registerProcessor(new OspreyProcessor<ReliableAsynMessage>() { @Override public Class<ReliableAsynMessage> interest() { return ReliableAsynMessage.class; } @Override public Result process(ReliableAsynMessage message) { Result result = new Result(); long time = System.currentTimeMillis(); Single single = cm.getRouter().getAtomic(message.getSingleKey()); if (single == null) { result.setSuccess(false); result.setErrorMessage("Current atomic is null"); return result; } try { message.getMethod().invoke(single.getTedis(), message.getArgs()); } catch (Throwable t) { logger.warn("write exception:" + single.getProperties(), t); try { statLog(message.getMethod().getName(), false, time); // InvocationTargetException ite = (InvocationTargetException) t; UndeclaredThrowableException ute = (UndeclaredThrowableException) ite .getTargetException(); if (ute.getUndeclaredThrowable() instanceof TimeoutException) { result.setSuccess(false); result.setErrorMessage("TimeoutException"); result.setRuntimeException(ute); return result; } else { ExecutionException ee = (ExecutionException) ute.getUndeclaredThrowable(); InvocationTargetException ite_1 = (InvocationTargetException) ee.getCause(); TedisException te = (TedisException) ite_1.getTargetException(); if (te.getCause() instanceof TedisConnectionException) { result.setSuccess(false); result.setErrorMessage("JedisConnectionException"); result.setRuntimeException(te); return result; } } } catch (Throwable tt) { logger.warn(":", tt); } } return result; } }); this.ospreyManager.init(); tedis = (RedisCommands) Proxy.newProxyInstance(RedisCommands.class.getClassLoader(), new Class[] { RedisCommands.class }, new TedisGroupInvocationHandler()); } catch (Exception e) { throw new TedisException("init failed", e); } inited = true; } }
From source file:org.apache.bookkeeper.stream.storage.impl.TestStorageContainerStoreImpl.java
private <T> void verifyNotFoundException(CompletableFuture<T> future, Status status) throws InterruptedException { try {//from www . j a v a 2 s . c o m future.get(); } catch (ExecutionException ee) { assertTrue(ee.getCause() instanceof StatusRuntimeException); StatusRuntimeException sre = (StatusRuntimeException) ee.getCause(); assertEquals(status, sre.getStatus()); } }
From source file:com.yoshio3.modules.AzureADServerAuthModule.java
private AuthenticationResult getAccessTokenFromRefreshToken(String refreshToken, String currentUri) throws Throwable { AuthenticationContext context;/* w ww.j a v a 2 s. co m*/ AuthenticationResult result; ExecutorService service = null; try { service = Executors.newFixedThreadPool(1); context = new AuthenticationContext(authority + tenant + "/", true, service); Future<AuthenticationResult> future = context.acquireTokenByRefreshToken(refreshToken, new ClientCredential(clientId, secretKey), null, null); result = future.get(); } catch (ExecutionException e) { throw e.getCause(); } finally { if (service != null) { service.shutdown(); } } if (result == null) { throw new ServiceUnavailableException("authentication result was null"); } return result; }
From source file:com.yoshio3.modules.AzureADServerAuthModule.java
private AuthenticationResult getAccessToken(AuthorizationCode authorizationCode, String currentUri) throws Throwable { String authCode = authorizationCode.getValue(); ClientCredential credential = new ClientCredential(clientId, secretKey); AuthenticationContext context;//from w ww . j a v a 2s . c om AuthenticationResult result; ExecutorService service = null; try { service = Executors.newFixedThreadPool(1); context = new AuthenticationContext(authority + tenant + "/", true, service); Future<AuthenticationResult> future = context.acquireTokenByAuthorizationCode(authCode, new URI(currentUri), credential, null); result = future.get(); } catch (ExecutionException e) { throw e.getCause(); } finally { if (service != null) { service.shutdown(); } } if (result == null) { throw new ServiceUnavailableException("authentication result was null"); } return result; }