List of usage examples for java.util.concurrent ExecutionException getCause
public synchronized Throwable getCause()
From source file:org.apache.pulsar.common.naming.TopicName.java
public static TopicName get(String topic) { try {/*from ww w. ja v a2 s . com*/ return cache.get(topic); } catch (ExecutionException e) { throw (RuntimeException) e.getCause(); } catch (UncheckedExecutionException e) { throw (RuntimeException) e.getCause(); } }
From source file:org.apache.hadoop.fs.s3a.S3AUtils.java
/** * Extract an exception from a failed future, and convert to an IOE. * @param operation operation which failed * @param path path operated on (may be null) * @param ee execution exception/*from ww w. j a v a2 s . co m*/ * @return an IOE which can be thrown */ public static IOException extractException(String operation, String path, ExecutionException ee) { IOException ioe; Throwable cause = ee.getCause(); if (cause instanceof AmazonClientException) { ioe = translateException(operation, path, (AmazonClientException) cause); } else if (cause instanceof IOException) { ioe = (IOException) cause; } else { ioe = new IOException(operation + " failed: " + cause, cause); } return ioe; }
From source file:net.lldp.checksims.util.threading.ParallelAlgorithm.java
/** * Internal backend: Execute given tasks on a new thread pool. * * Expects Callable tasks, with non-void returns. If the need for void returning functions emerges, might need * another version of this?/*from www . ja v a 2 s .co m*/ * * @param tasks Tasks to execute * @param <T> Type returned by the tasks * @return Collection of Ts */ private static <T, T2 extends Callable<T>> Collection<T> executeTasks(Collection<T2> tasks, StatusLogger logger) throws ChecksimsException { checkNotNull(tasks); if (tasks.size() == 0) { logs.warn("Parallel execution called with no tasks - no work done!"); return new ArrayList<>(); } if (executor.isShutdown()) { throw new ChecksimsException("Attempted to call executeTasks while executor was shut down!"); } logs.info("Starting work using " + threadCount + " threads."); // Invoke the executor on all the worker instances try { // Create a monitoring thread to show progress MonitorThread monitor = new MonitorThread(executor, logger); Thread monitorThread = new Thread(monitor); monitorThread.start(); List<Future<T>> results = executor.invokeAll(tasks); // Stop the monitor monitor.shutDown(); // Unpack the futures ArrayList<T> unpackInto = new ArrayList<>(); for (Future<T> future : results) { try { unpackInto.add(future.get()); } catch (ExecutionException e) { executor.shutdownNow(); logs.error("Fatal error in executed job!"); throw new ChecksimsException("Error while executing worker for future", e.getCause()); } } return unpackInto; } catch (InterruptedException e) { executor.shutdownNow(); throw new ChecksimsException("Execution of Checksims was interrupted!", e); } catch (RejectedExecutionException e) { executor.shutdownNow(); throw new ChecksimsException("Could not schedule execution of all tasks!", e); } }
From source file:org.apache.hadoop.hbase.client.RpcRetryingCallerWithReadReplicas.java
/** * Extract the real exception from the ExecutionException, and throws what makes more * sense./*from w ww.j a v a2 s. c o m*/ */ static void throwEnrichedException(ExecutionException e, int retries) throws RetriesExhaustedException, DoNotRetryIOException { Throwable t = e.getCause(); assert t != null; // That's what ExecutionException is about: holding an exception if (t instanceof RetriesExhaustedException) { throw (RetriesExhaustedException) t; } if (t instanceof DoNotRetryIOException) { throw (DoNotRetryIOException) t; } RetriesExhaustedException.ThrowableWithExtraContext qt = new RetriesExhaustedException.ThrowableWithExtraContext( t, EnvironmentEdgeManager.currentTime(), null); List<RetriesExhaustedException.ThrowableWithExtraContext> exceptions = Collections.singletonList(qt); throw new RetriesExhaustedException(retries, exceptions); }
From source file:edu.wpi.checksims.util.threading.ParallelAlgorithm.java
/** * Internal backend: Execute given tasks on a new thread pool. * * Expects Callable tasks, with non-void returns. If the need for void returning functions emerges, might need * another version of this?/* w w w . j av a2 s . c o m*/ * * @param tasks Tasks to execute * @param <T> Type returned by the tasks * @return Collection of Ts */ private static <T, T2 extends Callable<T>> Collection<T> executeTasks(Collection<T2> tasks) { checkNotNull(tasks); if (tasks.size() == 0) { logs.warn("Parallel execution called with no tasks - no work done!"); return new ArrayList<>(); } if (executor.isShutdown()) { throw new RuntimeException("Attempted to call executeTasks while executor was shut down!"); } logs.info("Starting work using " + threadCount + " threads."); // Invoke the executor on all the worker instances try { // Create a monitoring thread to show progress MonitorThread monitor = new MonitorThread(executor); Thread monitorThread = new Thread(monitor); monitorThread.start(); List<Future<T>> results = executor.invokeAll(tasks); // Stop the monitor monitor.shutDown(); // Unpack the futures ArrayList<T> unpackInto = new ArrayList<>(); for (Future<T> future : results) { try { unpackInto.add(future.get()); } catch (ExecutionException e) { executor.shutdownNow(); logs.error("Fatal error in executed job!"); throw new RuntimeException("Error while executing worker for future", e.getCause()); } } return unpackInto; } catch (InterruptedException e) { executor.shutdownNow(); logs.error("Execution of Checksims was interrupted!"); throw new RuntimeException(e); } catch (RejectedExecutionException e) { executor.shutdownNow(); logs.error("Could not schedule execution of all comparisons --- possibly too few resources available?"); throw new RuntimeException(e); } }
From source file:org.apache.hadoop.yarn.util.FSDownload.java
/** * Obtains the file status, first by checking the stat cache if it is * available, and then by getting it explicitly from the filesystem. If we got * the file status from the filesystem, it is added to the stat cache. * * The stat cache is expected to be managed by callers who provided it to * FSDownload./*from w ww . ja v a 2 s . co m*/ */ private static FileStatus getFileStatus(final FileSystem fs, final Path path, LoadingCache<Path, Future<FileStatus>> statCache) throws IOException { // if the stat cache does not exist, simply query the filesystem if (statCache == null) { return fs.getFileStatus(path); } try { // get or load it from the cache return statCache.get(path).get(); } catch (ExecutionException e) { Throwable cause = e.getCause(); // the underlying exception should normally be IOException if (cause instanceof IOException) { throw (IOException) cause; } else { throw new IOException(cause); } } catch (InterruptedException e) { // should not happen Thread.currentThread().interrupt(); throw new IOException(e); } }
From source file:org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.java
private static boolean deleteTreeLevel(final FSNamesystem fsn, final String subtreeRootPath, final long subTreeRootID, final AbstractFileTree.FileTree fileTree, int level) throws TransactionContextException, IOException { ArrayList<Future> barrier = new ArrayList<>(); for (final ProjectedINode dir : fileTree.getDirsByLevel(level)) { if (fileTree.countChildren(dir.getId()) <= BIGGEST_DELETABLE_DIR) { final String path = fileTree.createAbsolutePath(subtreeRootPath, dir); Future f = multiTransactionDeleteInternal(fsn, path, subTreeRootID); barrier.add(f);//from w ww. j ava 2s .c o m } else { //delete the content of the directory one by one. for (final ProjectedINode inode : fileTree.getChildren(dir.getId())) { if (!inode.isDirectory()) { final String path = fileTree.createAbsolutePath(subtreeRootPath, inode); Future f = multiTransactionDeleteInternal(fsn, path, subTreeRootID); barrier.add(f); } } // the dir is empty now. delete it. final String path = fileTree.createAbsolutePath(subtreeRootPath, dir); Future f = multiTransactionDeleteInternal(fsn, path, subTreeRootID); barrier.add(f); } } boolean result = true; for (Future f : barrier) { try { if (!((Boolean) f.get())) { result = false; } } catch (ExecutionException e) { result = false; LOG.error("Exception was thrown during partial delete", e); Throwable throwable = e.getCause(); if (throwable instanceof IOException) { throw (IOException) throwable; //pass the exception as is to the client } else { throw new IOException(e); //only io exception as passed to clients. } } catch (InterruptedException e) { e.printStackTrace(); } } return result; }
From source file:org.onosproject.bmv2.ctl.Bmv2ThriftClient.java
/** * Returns a client object to control the passed device. * * @param deviceId device id/*from w w w. ja v a 2 s .com*/ * @return bmv2 client object * @throws Bmv2RuntimeException if a connection to the device cannot be established */ public static Bmv2ThriftClient of(DeviceId deviceId) throws Bmv2RuntimeException { try { checkNotNull(deviceId, "deviceId cannot be null"); LOG.debug("Getting a client from cache... > deviceId{}", deviceId); return CLIENT_CACHE.get(deviceId); } catch (ExecutionException e) { LOG.debug("Exception while getting a client from cache: {} > ", e, deviceId); throw new Bmv2RuntimeException(e.getMessage(), e.getCause()); } }
From source file:com.leclercb.taskunifier.gui.main.Main.java
private static boolean loadApiPlugins() { API_PLUGINS = new PluginLoader<SynchronizerGuiPlugin>(SynchronizerGuiPlugin.class); API_PLUGINS.addPlugin(null, DummyGuiPlugin.getInstance()); File pluginsFolder = new File(getPluginsFolder()); boolean outdatedPlugins = false; File[] pluginFiles = pluginsFolder.listFiles(); for (File file : pluginFiles) { try {//from w w w . ja v a 2 s.co m ProcessLoadPlugin process = new ProcessLoadPlugin(file); Worker<SynchronizerGuiPlugin> worker = new Worker<SynchronizerGuiPlugin>(process); worker.setSilent(true); worker.execute(); try { worker.get(); } catch (ExecutionException e) { if (e.getCause() instanceof PluginException) throw e.getCause(); } } catch (PluginException e) { switch (e.getType()) { case MORE_THAN_ONE_PLUGIN: case NO_VALID_PLUGIN: case OUTDATED_PLUGIN: outdatedPlugins = true; break; default: outdatedPlugins = false; break; } GuiLogger.getLogger().warning(e.getMessage()); } catch (Throwable t) { GuiLogger.getLogger().log(Level.WARNING, "Error while loading plugin", t); } } API_PLUGINS.addListChangeListener(new ListChangeListener() { @Override public void listChange(ListChangeEvent evt) { SynchronizerGuiPlugin plugin = (SynchronizerGuiPlugin) evt.getValue(); if (evt.getChangeType() == ListChangeEvent.VALUE_REMOVED) { if (EqualsUtils.equals(Main.getUserSettings().getStringProperty("plugin.synchronizer.id"), plugin.getId())) SynchronizerUtils.setSynchronizerPlugin(DummyGuiPlugin.getInstance()); } } }); SynchronizerUtils.setSynchronizerPlugin(SynchronizerUtils.getSynchronizerPlugin()); return outdatedPlugins; }
From source file:com.mirth.connect.server.util.javascript.JavaScriptUtil.java
public static <T> T execute(JavaScriptTask<T> task) throws JavaScriptExecutorException, InterruptedException { Future<T> future = executor.submit(task); try {/*from w w w .j a v a 2 s . c om*/ return future.get(); } catch (ExecutionException e) { throw new JavaScriptExecutorException(e.getCause()); } catch (InterruptedException e) { // synchronize with JavaScriptTask.executeScript() so that it will not initialize the context while we are halting the task synchronized (task) { future.cancel(true); Context context = task.getContext(); if (context != null && context instanceof MirthContext) { ((MirthContext) context).setRunning(false); } } // TODO wait for the task thread to complete before exiting? Thread.currentThread().interrupt(); throw e; } }