Example usage for java.util.concurrent ExecutionException getCause

List of usage examples for java.util.concurrent ExecutionException getCause

Introduction

In this page you can find the example usage for java.util.concurrent ExecutionException getCause.

Prototype

public synchronized Throwable getCause() 

Source Link

Document

Returns the cause of this throwable or null if the cause is nonexistent or unknown.

Usage

From source file:org.openspaces.grid.gsm.containers.DefaultContainersSlaEnforcementEndpoint.java

/**
 * removes containers from the futureContainers list if the future is done (container started).
 * @param sla //  w  w  w  .java  2 s. c  om
 * @throws FailedToStartNewGridServiceContainersException 
 */
private void cleanFutureContainers(ContainersSlaPolicy sla)
        throws FailedToStartNewGridServiceContainersException {

    FutureGridServiceContainer future;
    while ((future = state.removeNextDoneFutureContainer(pu)) != null) {
        Exception exception = null;

        try {
            GridServiceContainer container = future.get();
            if (container.isDiscovered()) {
                logger.info("Container started successfully " + ContainersSlaUtils.gscToString(container));
            }

        } catch (ExecutionException e) {
            // if runtime or error propagate exception "as-is"
            Throwable cause = e.getCause();
            if (cause instanceof TimeoutException || cause instanceof AdminException
                    || cause instanceof InterruptedException) {
                // expected exception
                exception = e;
            } else {
                throw new IllegalStateException("Unexpected Exception when starting a new container.", e);
            }
        } catch (TimeoutException e) {
            exception = e;
        }

        if (exception != null) {
            state.failedFutureContainer(future);
            FailedToStartNewGridServiceContainersException ex = new FailedToStartNewGridServiceContainersException(
                    future.getGridServiceAgent().getMachine(), pu, exception);

            if (sla.isUndeploying()) {
                logger.info("Ignoring failure to start new container since undeploying.", ex);
            } else {
                throw ex;
            }
        }
    }

    cleanFailedFutureContainers();
}

From source file:uk.co.revsys.content.repository.cloud.CloudCacheStore.java

@Override
public void applyModifications(List<? extends Modification> modifications) throws CacheLoaderException {
    List<Future<?>> futures = new LinkedList<Future<?>>();
    asyncCommandFutures.set(futures);/*ww w.j  a  va 2s  .  c om*/

    try {
        super.applyModifications(modifications);
        if (pollFutures) {
            CacheLoaderException exception = null;
            try {
                futures = asyncCommandFutures.get();
                if (log.isTraceEnabled()) {
                    log.tracef("Futures, in order: %s", futures);
                }
                for (Future<?> f : futures) {
                    Object o = f.get();
                    if (log.isTraceEnabled()) {
                        log.tracef("Future %s returned %s", f, o);
                    }
                }
            } catch (InterruptedException ie) {
                Thread.currentThread().interrupt();
            } catch (ExecutionException ee) {
                exception = convertToCacheLoaderException("Caught exception in async process", ee.getCause());
            }
            if (exception != null) {
                throw exception;
            }
        }
    } finally {
        asyncCommandFutures.remove();
    }
}

From source file:org.obiba.mica.dataset.service.HarmonizedDatasetService.java

protected Map<String, List<DatasetVariable>> populateHarmonizedVariablesMap(HarmonizationDataset dataset) {
    Map<String, List<DatasetVariable>> map = Maps.newHashMap();

    if (!dataset.getBaseStudyTables().isEmpty()) {
        Iterable<DatasetVariable> res = dataset.getBaseStudyTables().stream()
                .map(s -> helper.asyncGetDatasetVariables(() -> getDatasetVariables(dataset, s))).map(f -> {
                    try {
                        return f.get();
                    } catch (ExecutionException e) {
                        if (e.getCause() instanceof NoSuchValueTableException) {
                            return Lists.<DatasetVariable>newArrayList(); // ignore (case the study does not implement this harmonization dataset))
                        }/*from  w ww.java 2  s  .  com*/
                        if (e.getCause() instanceof MagmaRuntimeException) {
                            throw new DatasourceNotAvailableException(e.getCause());
                        }

                        throw Throwables.propagate(e.getCause());
                    } catch (InterruptedException ie) {
                        throw Throwables.propagate(ie);
                    }
                }).reduce(Iterables::concat).get();

        for (DatasetVariable variable : res) {
            if (!map.containsKey(variable.getParentId())) {
                map.put(variable.getParentId(), Lists.newArrayList());
            }

            map.get(variable.getParentId()).add(variable);
        }
    }

    return map;
}

From source file:org.limewire.mojito.manager.BootstrapProcess.java

private void checkCollisions(Collection<? extends Contact> collisions) {
    OnewayExchanger<PingResult, ExecutionException> c = new OnewayExchanger<PingResult, ExecutionException>(
            true) {/*ww  w. j a va 2  s .c  o m*/
        @Override
        public synchronized void setValue(PingResult value) {
            if (LOG.isErrorEnabled()) {
                LOG.error(context.getLocalNode() + " collides with " + value.getContact());
            }

            super.setValue(value);
            handleCollision(value);
        }

        @Override
        public synchronized void setException(ExecutionException exception) {
            LOG.info("ExecutionException", exception);
            super.setException(exception);

            Throwable cause = exception.getCause();
            if (cause instanceof DHTTimeoutException) {
                // Ignore, everything is fine! Nobody did respond
                // and we can keep our Node ID which is good!
                // Continue with finding random Node IDs
                refreshAllBuckets();

            } else {
                exchanger.setException(exception);
            }
        }
    };

    Contact sender = ContactUtils.createCollisionPingSender(context.getLocalNode());
    PingIterator pinger = new PingIteratorFactory.CollisionPinger(context, sender,
            org.limewire.collection.CollectionUtils.toSet(collisions));

    PingResponseHandler handler = new PingResponseHandler(context, sender, pinger);
    start(handler, c);
}

From source file:org.apache.nifi.processors.standard.ExecuteProcess.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    if (proxyOut == null) {
        proxyOut = new ProxyOutputStream(getLogger());
    }/*  w  ww.j  av a  2s .  c o m*/

    final Long batchNanos = context.getProperty(BATCH_DURATION).asTimePeriod(TimeUnit.NANOSECONDS);

    final String command = context.getProperty(COMMAND).getValue();
    final String arguments = context.getProperty(COMMAND_ARGUMENTS).isSet()
            ? context.getProperty(COMMAND_ARGUMENTS).evaluateAttributeExpressions().getValue()
            : null;

    final List<String> commandStrings = createCommandStrings(context, command, arguments);
    final String commandString = StringUtils.join(commandStrings, " ");

    if (longRunningProcess == null || longRunningProcess.isDone()) {
        try {
            longRunningProcess = launchProcess(context, commandStrings, batchNanos, proxyOut);
        } catch (final IOException ioe) {
            getLogger().error("Failed to create process due to {}", new Object[] { ioe });
            context.yield();
            return;
        }
    } else {
        getLogger().info("Read from long running process");
    }

    if (!isScheduled()) {
        getLogger().info("User stopped processor; will terminate process immediately");
        longRunningProcess.cancel(true);
        return;
    }

    // Create a FlowFile that we can write to and set the OutputStream for the FlowFile
    // as the delegate for the ProxyOuptutStream, then wait until the process finishes
    // or until the specified amount of time
    FlowFile flowFile = session.create();
    flowFile = session.write(flowFile, new OutputStreamCallback() {
        @Override
        public void process(final OutputStream flowFileOut) throws IOException {
            try (final OutputStream out = new BufferedOutputStream(flowFileOut)) {
                proxyOut.setDelegate(out);

                if (batchNanos == null) {
                    // we are not creating batches; wait until process terminates.
                    // NB!!! Maybe get(long timeout, TimeUnit unit) should
                    // be used to avoid waiting forever.
                    try {
                        longRunningProcess.get();
                    } catch (final InterruptedException ie) {
                    } catch (final ExecutionException ee) {
                        getLogger().error("Process execution failed due to {}", new Object[] { ee.getCause() });
                    }
                } else {
                    // wait the allotted amount of time.
                    try {
                        TimeUnit.NANOSECONDS.sleep(batchNanos);
                    } catch (final InterruptedException ie) {
                    }
                }

                proxyOut.setDelegate(null); // prevent from writing to this
                // stream
            }
        }
    });

    if (flowFile.getSize() == 0L) {
        // If no data was written to the file, remove it
        session.remove(flowFile);
    } else if (failure.get()) {
        // If there was a failure processing the output of the Process, remove the FlowFile
        session.remove(flowFile);
        getLogger().error("Failed to read data from Process, so will not generate FlowFile");
    } else {
        // add command and arguments as attribute
        flowFile = session.putAttribute(flowFile, ATTRIBUTE_COMMAND, command);
        if (arguments != null) {
            flowFile = session.putAttribute(flowFile, ATTRIBUTE_COMMAND_ARGS, arguments);
        }

        // All was good. Generate event and transfer FlowFile.
        session.getProvenanceReporter().create(flowFile, "Created from command: " + commandString);
        getLogger().info("Created {} and routed to success", new Object[] { flowFile });
        session.transfer(flowFile, REL_SUCCESS);
    }

    // Commit the session so that the FlowFile is transferred to the next processor
    session.commit();
}

From source file:herddb.cli.HerdDBCLI.java

private static QueryWithParameters rewriteQuery(String query, TableSpaceMapper mapper, boolean frommysqldump)
        throws ScriptException {
    try {/*from ww w . j a v  a 2 s . c  o  m*/

        List<Object> parameters = new ArrayList<>();

        if (frommysqldump && query.startsWith("INSERT INTO")) {
            // this is faster than CCJSqlParserUtil and will allow the cache to work at "client-side" too
            QueryWithParameters rewriteSimpleInsertStatement = MySqlDumpInsertStatementRewriter
                    .rewriteSimpleInsertStatement(query);
            if (rewriteSimpleInsertStatement != null) {
                query = rewriteSimpleInsertStatement.query;
                parameters.addAll(rewriteSimpleInsertStatement.jdbcParameters);
                String schema = mapper == null ? null
                        : mapper.getTableSpace(rewriteSimpleInsertStatement.tableName);
                return new QueryWithParameters(query, rewriteSimpleInsertStatement.tableName, parameters,
                        schema);
            }
        }

        String _query = query;
        net.sf.jsqlparser.statement.Statement stmt = PARSER_CACHE.get(_query, () -> {
            return CCJSqlParserUtil.parse(_query);
        });
        if (stmt instanceof Insert) {
            boolean somethingdone = false;
            Insert insert = (Insert) stmt;
            ItemsList itemlist = insert.getItemsList();
            if (itemlist instanceof ExpressionList) {
                ExpressionList list = (ExpressionList) itemlist;
                List<Expression> expressions = list.getExpressions();
                for (int i = 0; i < expressions.size(); i++) {
                    Expression e = expressions.get(i);
                    boolean done = false;
                    if (e instanceof StringValue) {
                        StringValue sv = (StringValue) e;
                        parameters.add(sv.getValue());
                        done = true;
                    } else if (e instanceof LongValue) {
                        LongValue sv = (LongValue) e;
                        parameters.add(sv.getValue());
                        done = true;
                    } else if (e instanceof NullValue) {
                        NullValue sv = (NullValue) e;
                        parameters.add(null);
                        done = true;
                    } else if (e instanceof TimestampValue) {
                        TimestampValue sv = (TimestampValue) e;
                        parameters.add(sv.getValue());
                        done = true;
                    } else if (e instanceof DoubleValue) {
                        DoubleValue sv = (DoubleValue) e;
                        parameters.add(sv.getValue());
                        done = true;
                    }
                    if (done) {
                        somethingdone = true;
                        expressions.set(i, new JdbcParameter());
                    }
                }
                if (somethingdone) {
                    StringBuilder queryResult = new StringBuilder();
                    InsertDeParser deparser = new InsertDeParser(new ExpressionDeParser(null, queryResult),
                            null, queryResult);
                    deparser.deParse(insert);
                    query = queryResult.toString();
                }
            } else if (itemlist instanceof MultiExpressionList) {
                MultiExpressionList mlist = (MultiExpressionList) itemlist;
                List<ExpressionList> lists = mlist.getExprList();
                for (ExpressionList list : lists) {
                    List<Expression> expressions = list.getExpressions();
                    for (int i = 0; i < expressions.size(); i++) {
                        Expression e = expressions.get(i);
                        boolean done = false;
                        if (e instanceof StringValue) {
                            StringValue sv = (StringValue) e;
                            parameters.add(sv.getValue());
                            done = true;
                        } else if (e instanceof LongValue) {
                            LongValue sv = (LongValue) e;
                            parameters.add(sv.getValue());
                            done = true;
                        } else if (e instanceof NullValue) {
                            NullValue sv = (NullValue) e;
                            parameters.add(null);
                            done = true;
                        } else if (e instanceof TimestampValue) {
                            TimestampValue sv = (TimestampValue) e;
                            parameters.add(sv.getValue());
                            done = true;
                        } else if (e instanceof DoubleValue) {
                            DoubleValue sv = (DoubleValue) e;
                            parameters.add(sv.getValue());
                            done = true;
                        }
                        if (done) {
                            somethingdone = true;
                            expressions.set(i, new JdbcParameter());
                        }
                    }
                }
                if (somethingdone) {
                    StringBuilder queryResult = new StringBuilder();
                    InsertDeParser deparser = new InsertDeParser(new ExpressionDeParser(null, queryResult),
                            null, queryResult);
                    deparser.deParse(insert);
                    query = queryResult.toString();
                }
            }
            String schema = mapper == null ? null : mapper.getTableSpace(stmt);
            return new QueryWithParameters(query, null, parameters, schema);
        } else {
            String schema = mapper == null ? null : mapper.getTableSpace(stmt);
            return new QueryWithParameters(query, null, Collections.emptyList(), schema);
        }
    } catch (ExecutionException err) {
        System.out.println("error for query: " + query + " -> " + err.getCause());
        return null;
    }
}

From source file:org.apache.hadoop.hbase.index.mapreduce.IndexLoadIncrementalHFile.java

/**
 * This takes the LQI's grouped by likely regions and attempts to bulk load them. Any failures are
 * re-queued for another pass with the groupOrSplitPhase.
 *///from   w  ww  .  ja va2 s  .  c  o  m
protected void bulkLoadPhase(final HTable table, final HConnection conn, ExecutorService pool,
        Deque<LoadQueueItem> queue, final Multimap<ByteBuffer, LoadQueueItem> regionGroups) throws IOException {
    // atomically bulk load the groups.
    Set<Future<List<LoadQueueItem>>> loadingFutures = new HashSet<Future<List<LoadQueueItem>>>();
    for (Entry<ByteBuffer, ? extends Collection<LoadQueueItem>> e : regionGroups.asMap().entrySet()) {
        final byte[] first = e.getKey().array();
        final Collection<LoadQueueItem> lqis = e.getValue();

        final Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() {
            public List<LoadQueueItem> call() throws Exception {
                List<LoadQueueItem> toRetry = tryAtomicRegionLoad(conn, table.getTableName(), first, lqis);
                return toRetry;
            }
        };
        loadingFutures.add(pool.submit(call));
    }

    // get all the results.
    for (Future<List<LoadQueueItem>> future : loadingFutures) {
        try {
            List<LoadQueueItem> toRetry = future.get();

            // LQIs that are requeued to be regrouped.
            queue.addAll(toRetry);

        } catch (ExecutionException e1) {
            Throwable t = e1.getCause();
            if (t instanceof IOException) {
                // At this point something unrecoverable has happened.
                // TODO Implement bulk load recovery
                throw new IOException("BulkLoad encountered an unrecoverable problem", t);
            }
            LOG.error("Unexpected execution exception during bulk load", e1);
            throw new IllegalStateException(t);
        } catch (InterruptedException e1) {
            LOG.error("Unexpected interrupted exception during bulk load", e1);
            throw new IllegalStateException(e1);
        }
    }
}

From source file:org.apache.hadoop.yarn.util.TestFSDownload.java

@Test(timeout = 10000)
public void testDownloadBadPublic() throws IOException, URISyntaxException, InterruptedException {
    Configuration conf = new Configuration();
    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
    FileContext files = FileContext.getLocalFSFileContext(conf);
    final Path basedir = files.makeQualified(new Path("target", TestFSDownload.class.getSimpleName()));
    files.mkdir(basedir, null, true);//www.j a  va2 s. com
    conf.setStrings(TestFSDownload.class.getName(), basedir.toString());

    Map<LocalResource, LocalResourceVisibility> rsrcVis = new HashMap<LocalResource, LocalResourceVisibility>();

    Random rand = new Random();
    long sharedSeed = rand.nextLong();
    rand.setSeed(sharedSeed);
    System.out.println("SEED: " + sharedSeed);

    Map<LocalResource, Future<Path>> pending = new HashMap<LocalResource, Future<Path>>();
    ExecutorService exec = Executors.newSingleThreadExecutor();
    LocalDirAllocator dirs = new LocalDirAllocator(TestFSDownload.class.getName());
    int size = 512;
    LocalResourceVisibility vis = LocalResourceVisibility.PUBLIC;
    Path path = new Path(basedir, "test-file");
    LocalResource rsrc = createFile(files, path, size, rand, vis);
    rsrcVis.put(rsrc, vis);
    Path destPath = dirs.getLocalPathForWrite(basedir.toString(), size, conf);
    destPath = new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet()));
    FSDownload fsd = new FSDownload(files, UserGroupInformation.getCurrentUser(), conf, destPath, rsrc);
    pending.put(rsrc, exec.submit(fsd));
    exec.shutdown();
    while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS))
        ;
    Assert.assertTrue(pending.get(rsrc).isDone());

    try {
        for (Map.Entry<LocalResource, Future<Path>> p : pending.entrySet()) {
            p.getValue().get();
            Assert.fail("We localized a file that is not public.");
        }
    } catch (ExecutionException e) {
        Assert.assertTrue(e.getCause() instanceof IOException);
    }
}

From source file:org.apache.hadoop.hbase.index.mapreduce.IndexLoadIncrementalHFile.java

/**
 * @return A Multimap<startkey, LoadQueueItem> that groups LQI by likely bulk load region targets.
 *//*from  w  w w. j  a  v  a  2s  .  c  om*/
private Multimap<ByteBuffer, LoadQueueItem> groupOrSplitPhase(final HTable table, ExecutorService pool,
        Deque<LoadQueueItem> queue, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
    // <region start key, LQI> need synchronized only within this scope of this
    // phase because of the puts that happen in futures.
    Multimap<ByteBuffer, LoadQueueItem> rgs = HashMultimap.create();
    final Multimap<ByteBuffer, LoadQueueItem> regionGroups = Multimaps.synchronizedMultimap(rgs);

    // drain LQIs and figure out bulk load groups
    Set<Future<List<LoadQueueItem>>> splittingFutures = new HashSet<Future<List<LoadQueueItem>>>();
    while (!queue.isEmpty()) {
        final LoadQueueItem item = queue.remove();

        final Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() {
            public List<LoadQueueItem> call() throws Exception {
                List<LoadQueueItem> splits = groupOrSplit(regionGroups, item, table, startEndKeys);
                return splits;
            }
        };
        splittingFutures.add(pool.submit(call));
    }
    // get all the results. All grouping and splitting must finish before
    // we can attempt the atomic loads.
    for (Future<List<LoadQueueItem>> lqis : splittingFutures) {
        try {
            List<LoadQueueItem> splits = lqis.get();
            if (splits != null) {
                queue.addAll(splits);
            }
        } catch (ExecutionException e1) {
            Throwable t = e1.getCause();
            if (t instanceof IOException) {
                LOG.error("IOException during splitting", e1);
                throw (IOException) t; // would have been thrown if not parallelized,
            }
            LOG.error("Unexpected execution exception during splitting", e1);
            throw new IllegalStateException(t);
        } catch (InterruptedException e1) {
            LOG.error("Unexpected interrupted exception during splitting", e1);
            throw new IllegalStateException(e1);
        }
    }
    return regionGroups;
}

From source file:org.n52.wps.server.handler.RequestHandler.java

/**
 * Handle a request after its type is determined. The request is scheduled
 * for execution. If the server has enough free resources, the client will
 * be served immediately. If time runs out, the client will be asked to come
 * back later with a reference to the result.
 * //from w  w  w . j av  a 2  s .  c  o m
 * @param req The request of the client.
 * @throws ExceptionReport
 */
public void handle() throws ExceptionReport {
    Response resp = null;
    if (req == null) {
        throw new ExceptionReport("Internal Error", "");
    }
    if (req instanceof ExecuteRequest) {
        // cast the request to an executerequest
        ExecuteRequest execReq = (ExecuteRequest) req;

        execReq.updateStatusAccepted();

        ExceptionReport exceptionReport = null;
        try {
            if (execReq.isStoreResponse()) {
                resp = new ExecuteResponse(execReq);
                InputStream is = resp.getAsStream();
                IOUtils.copy(is, os);
                is.close();
                pool.submit(execReq);
                return;
            }
            try {
                // retrieve status with timeout enabled
                try {
                    resp = pool.submit(execReq).get();
                } catch (ExecutionException ee) {
                    LOGGER.warn("exception while handling ExecuteRequest.");
                    // the computation threw an error
                    // probably the client input is not valid
                    if (ee.getCause() instanceof ExceptionReport) {
                        exceptionReport = (ExceptionReport) ee.getCause();
                    } else {
                        exceptionReport = new ExceptionReport(
                                "An error occurred in the computation: " + ee.getMessage(),
                                ExceptionReport.NO_APPLICABLE_CODE);
                    }
                } catch (InterruptedException ie) {
                    LOGGER.warn("interrupted while handling ExecuteRequest.");
                    // interrupted while waiting in the queue
                    exceptionReport = new ExceptionReport("The computation in the process was interrupted.",
                            ExceptionReport.NO_APPLICABLE_CODE);
                }
            } finally {
                if (exceptionReport != null) {
                    LOGGER.debug("ExceptionReport not null: " + exceptionReport.getMessage());
                    // NOT SURE, if this exceptionReport is also written to the DB, if required... test please!
                    throw exceptionReport;
                }
                // send the result to the outputstream of the client.
                /*   if(((ExecuteRequest) req).isQuickStatus()) {
                      resp = new ExecuteResponse(execReq);
                   }*/
                else if (resp == null) {
                    LOGGER.warn("null response handling ExecuteRequest.");
                    throw new ExceptionReport("Problem with handling threads in RequestHandler",
                            ExceptionReport.NO_APPLICABLE_CODE);
                }
                if (!execReq.isStoreResponse()) {
                    InputStream is = resp.getAsStream();
                    IOUtils.copy(is, os);
                    is.close();
                    LOGGER.info("Served ExecuteRequest.");
                }
            }
        } catch (RejectedExecutionException ree) {
            LOGGER.warn("exception handling ExecuteRequest.", ree);
            // server too busy?
            throw new ExceptionReport(
                    "The requested process was rejected. Maybe the server is flooded with requests.",
                    ExceptionReport.SERVER_BUSY);
        } catch (Exception e) {
            LOGGER.error("exception handling ExecuteRequest.", e);
            if (e instanceof ExceptionReport) {
                throw (ExceptionReport) e;
            }
            throw new ExceptionReport("Could not read from response stream.",
                    ExceptionReport.NO_APPLICABLE_CODE);
        }
    } else {
        // for GetCapabilities and DescribeProcess:
        resp = req.call();
        try {
            InputStream is = resp.getAsStream();
            IOUtils.copy(is, os);
            is.close();
        } catch (IOException e) {
            throw new ExceptionReport("Could not read from response stream.",
                    ExceptionReport.NO_APPLICABLE_CODE);
        }

    }
}