Example usage for java.util.concurrent ExecutionException getCause

List of usage examples for java.util.concurrent ExecutionException getCause

Introduction

In this page you can find the example usage for java.util.concurrent ExecutionException getCause.

Prototype

public synchronized Throwable getCause() 

Source Link

Document

Returns the cause of this throwable or null if the cause is nonexistent or unknown.

Usage

From source file:org.apache.hadoop.hive.ql.exec.tez.WorkloadManager.java

public WmTezSession getSession(TezSessionState session, MappingInput input, HiveConf conf,
        final WmContext wmContext) throws Exception {
    WmEvent wmEvent = new WmEvent(WmEvent.EventType.GET);
    // Note: not actually used for pool sessions; verify some things like doAs are not set.
    validateConfig(conf);/*from  w  w w  .j av a2 s. c o m*/
    String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID);
    SettableFuture<WmTezSession> future = SettableFuture.create();
    WmTezSession wmSession = checkSessionForReuse(session);
    GetRequest req = new GetRequest(input, queryId, future, wmSession, getRequestVersion.incrementAndGet(),
            wmContext);
    currentLock.lock();
    try {
        current.getRequests.add(req);
        if (req.sessionToReuse != null) {
            // Note: we assume reuse is only possible for the same user and config.
            current.toReuse.put(wmSession, req);
        }
        notifyWmThreadUnderLock();
    } finally {
        currentLock.unlock();
    }
    try {
        WmTezSession sessionState = future.get();
        wmEvent.endEvent(sessionState);
        return sessionState;
    } catch (ExecutionException ex) {
        Throwable realEx = ex.getCause();
        throw realEx instanceof Exception ? (Exception) realEx : ex;
    }
}

From source file:skewtune.mapreduce.STJobTracker.java

@Override
public JobID splitTask(TaskID taskid, int n) throws IOException, InterruptedException {
    try {//from w ww  . j  a va  2 s  .  c  o  m
        JobID jobid = fastSplitTask(taskid, n).get();
        if (jobid != null) {
            LOG.info("new splitted job " + jobid);
        }
        return jobid;
    } catch (ExecutionException e) {
        throw new IOException(e.getCause()); // wrap again!
    }
}

From source file:skewtune.mapreduce.STJobTracker.java

/**
 * schedule scan task. scan the input data and collect information.
 * once information collected, scheduling algorithm will run and launch reactive task.
 * only happens on handover/* www. j  av  a  2  s.c  o  m*/
 * 
 * @param taskid
 * @param action
 * @return
 * @throws IOException
 * @throws InterruptedException
 */
private Future<JobID> scheduleScanTask(TaskID taskid, JobInProgress.ReactionContext action)
        throws IOException, InterruptedException {
    JobInProgress jip = null;
    synchronized (jobs) {
        jip = jobs.get(taskid.getJobID());
    }

    if (jip == null) {
        String msg = "unknown task " + taskid;
        LOG.error(msg);
        throw new IOException(msg);
    }

    JobID jobid = null;
    try {
        jip.waitUntilReadyToSplit(taskid);
        if (LOG.isDebugEnabled()) {
            LOG.debug("scheduling asynchronous scan task for task " + taskid);
        }
        return this.asyncWorkers.submit(new ScanTask(jip, taskid, action));
    } catch (ExecutionException e) {
        throw new IOException(e.getCause()); // wrap again!
    }
}

From source file:skewtune.mapreduce.STJobTracker.java

private Future<JobID> fastSplitTask(ReactionContext context, boolean speculative)
        throws IOException, InterruptedException {
    JobInProgress jip = context.getJob();
    TaskID taskid = context.getTaskID();

    synchronized (pendingReactiveJob) {
        if (!jip.canSpeculateThis(taskid) || pendingReactiveJob.contains(taskid)
                || jip.hasReactiveJob(taskid)) { // being paranoid.
            LOG.warn("reactive job is already scheduled or running for " + taskid);
            return null;
        }/*from  w  ww  . j  ava 2  s .  c  o  m*/
        pendingReactiveJob.add(taskid);
    }

    // FIXME split the task using asynchronous task
    // check whether both job token and meta data has been loaded

    JobID jobid = null;
    try {
        jip.waitUntilReadyToSplit(taskid);
        if (LOG.isDebugEnabled()) {
            LOG.debug("scheduling asynchronous split task for task " + taskid);
        }
        //            
        //            long now = System.currentTimeMillis();
        //            ClusterInfo clusterInfo = this.getClusterInfo(context,now);
        //            Plan p = PartitionPlanner.plan(context, clusterInfo, now);
        //            
        //            if ( LOG.isInfoEnabled() ) {
        //                LOG.info(String.format("split task %s into %d tasks",taskid.toString(),p.getNumPartitions()));
        //            }

        return this.asyncWorkers.submit(new SplitTask(context, speculative));

        //            return this.asyncWorkers.submit(new SplitTask(jip, taskid, p.getNumPartitions(), context, speculative));
    } catch (ExecutionException e) {
        throw new IOException(e.getCause()); // wrap again!
    }
}

From source file:com.clxcommunications.xms.ApiConnectionIT.java

@Test
public void canHandle500WhenFetchingBatch() throws Exception {
    String spid = TestUtils.freshServicePlanId();
    BatchId batchId = TestUtils.freshBatchId();

    String path = "/v1/" + spid + "/batches/" + batchId;

    wm.stubFor(get(urlEqualTo(path)).willReturn(aResponse().withStatus(500)
            .withHeader("Content-Type", ContentType.TEXT_PLAIN.toString()).withBody("BAD")));

    ApiConnection conn = ApiConnection.builder().servicePlanId(spid).token("tok")
            .endpoint("http://localhost:" + wm.port()).start();

    /*/*from  www . java2s . c om*/
     * The exception we'll receive in the callback. Need to store it to
     * verify that it is the same exception as received from #get().
     */
    final AtomicReference<Exception> failException = new AtomicReference<Exception>();

    try {
        /*
         * Used to make sure callback and test thread are agreeing about the
         * failException variable.
         */
        final CountDownLatch latch = new CountDownLatch(1);

        FutureCallback<MtBatchSmsResult> testCallback = new TestCallback<MtBatchSmsResult>() {

            @Override
            public void failed(Exception exception) {
                if (!failException.compareAndSet(null, exception)) {
                    fail("failed called multiple times");
                }

                latch.countDown();
            }

        };

        Future<MtBatchSmsResult> future = conn.fetchBatchAsync(batchId, testCallback);

        // Give plenty of time for the callback to be called.
        latch.await();

        future.get();
        fail("unexpected future get success");
    } catch (ExecutionException ee) {
        /*
         * The exception cause should be the same as we received in the
         * callback.
         */
        assertThat(failException.get(), is(theInstance(ee.getCause())));
        assertThat(ee.getCause(), is(instanceOf(UnexpectedResponseException.class)));

        UnexpectedResponseException ure = (UnexpectedResponseException) ee.getCause();

        HttpResponse response = ure.getResponse();
        assertThat(response, notNullValue());
        assertThat(response.getStatusLine().getStatusCode(), is(500));
        assertThat(response.getEntity().getContentType().getValue(), is(ContentType.TEXT_PLAIN.toString()));

        byte[] buf = new byte[100];
        int read;

        InputStream contentStream = null;
        try {
            contentStream = response.getEntity().getContent();
            read = contentStream.read(buf);
        } catch (IOException ioe) {
            throw new AssertionError("unexpected exception: " + ioe.getMessage(), ioe);
        } finally {
            if (contentStream != null) {
                try {
                    contentStream.close();
                } catch (IOException ioe) {
                    throw new AssertionError("unexpected exception: " + ioe.getMessage(), ioe);
                }
            }
        }

        assertThat(read, is(3));
        assertThat(Arrays.copyOf(buf, 3), is(new byte[] { 'B', 'A', 'D' }));
    } finally {
        conn.close();
    }

    verifyGetRequest(path);
}

From source file:com.clxcommunications.xms.ApiConnectionIT.java

@Test
public void canHandle500WhenDeletingGroup() throws Exception {
    String spid = TestUtils.freshServicePlanId();
    GroupId groupId = TestUtils.freshGroupId();

    String path = "/v1/" + spid + "/groups/" + groupId;

    wm.stubFor(delete(urlEqualTo(path)).willReturn(aResponse().withStatus(500)
            .withHeader("Content-Type", ContentType.TEXT_PLAIN.toString()).withBody("BAD")));

    ApiConnection conn = ApiConnection.builder().servicePlanId(spid).token("tok")
            .endpoint("http://localhost:" + wm.port()).start();

    /*/* www .  j av  a  2  s . com*/
     * The exception we'll receive in the callback. Need to store it to
     * verify that it is the same exception as received from #get().
     */
    final AtomicReference<Exception> failException = new AtomicReference<Exception>();

    try {
        /*
         * Used to make sure callback and test thread are agreeing about the
         * failException variable.
         */
        final CountDownLatch latch = new CountDownLatch(1);

        FutureCallback<Void> testCallback = new TestCallback<Void>() {

            @Override
            public void failed(Exception exception) {
                if (!failException.compareAndSet(null, exception)) {
                    fail("failed called multiple times");
                }

                latch.countDown();
            }

        };

        Future<Void> future = conn.deleteGroupAsync(groupId, testCallback);

        // Give plenty of time for the callback to be called.
        latch.await();

        future.get();
        fail("unexpected future get success");
    } catch (ExecutionException ee) {
        /*
         * The exception cause should be the same as we received in the
         * callback.
         */
        assertThat(failException.get(), is(theInstance(ee.getCause())));
        assertThat(ee.getCause(), is(instanceOf(UnexpectedResponseException.class)));

        UnexpectedResponseException ure = (UnexpectedResponseException) ee.getCause();

        HttpResponse response = ure.getResponse();
        assertThat(response, notNullValue());
        assertThat(response.getStatusLine().getStatusCode(), is(500));
        assertThat(response.getEntity().getContentType().getValue(), is(ContentType.TEXT_PLAIN.toString()));

        byte[] buf = new byte[100];
        int read;

        InputStream contentStream = null;
        try {
            contentStream = response.getEntity().getContent();
            read = contentStream.read(buf);
        } catch (IOException ioe) {
            throw new AssertionError("unexpected exception: " + ioe.getMessage(), ioe);
        } finally {
            if (contentStream != null) {
                try {
                    contentStream.close();
                } catch (IOException ioe) {
                    throw new AssertionError("unexpected exception: " + ioe.getMessage(), ioe);
                }
            }
        }

        assertThat(read, is(3));
        assertThat(Arrays.copyOf(buf, 3), is(new byte[] { 'B', 'A', 'D' }));
    } finally {
        conn.close();
    }

    verifyDeleteRequest(path);
}

From source file:hudson.model.Hudson.java

private synchronized void load() throws IOException {
    long startTime = System.currentTimeMillis();
    XmlFile cfg = getConfigFile();//from w w  w .j a  v  a2s.c  o m
    if (cfg.exists()) {
        // reset some data that may not exit in the disk file
        // so that we can take a proper compensation action later.
        primaryView = null;
        views.clear();
        cfg.unmarshal(this);
    }
    clouds.setOwner(this);

    File projectsDir = new File(root, "jobs");
    if (!projectsDir.isDirectory() && !projectsDir.mkdirs()) {
        if (projectsDir.exists())
            throw new IOException(projectsDir + " is not a directory");
        throw new IOException("Unable to create " + projectsDir
                + "\nPermission issue? Please create this directory manually.");
    }
    File[] subdirs = projectsDir.listFiles(new FileFilter() {
        public boolean accept(File child) {
            return child.isDirectory() && Items.getConfigFile(child).exists();
        }
    });
    items.clear();
    if (PARALLEL_LOAD) {
        // load jobs in parallel for better performance
        LOGGER.info("Loading in " + TWICE_CPU_NUM + " parallel threads");
        List<Future<TopLevelItem>> loaders = new ArrayList<Future<TopLevelItem>>();
        for (final File subdir : subdirs) {
            loaders.add(threadPoolForLoad.submit(new Callable<TopLevelItem>() {
                public TopLevelItem call() throws Exception {
                    Thread t = Thread.currentThread();
                    String name = t.getName();
                    t.setName("Loading " + subdir);
                    try {
                        long start = System.currentTimeMillis();
                        TopLevelItem item = (TopLevelItem) Items.load(Hudson.this, subdir);
                        if (LOG_STARTUP_PERFORMANCE)
                            LOGGER.info("Loaded " + item.getName() + " in "
                                    + (System.currentTimeMillis() - start) + "ms by " + name);
                        return item;
                    } finally {
                        t.setName(name);
                    }
                }
            }));
        }

        for (Future<TopLevelItem> loader : loaders) {
            try {
                TopLevelItem item = loader.get();
                items.put(item.getName(), item);
            } catch (ExecutionException e) {
                LOGGER.log(Level.WARNING, "Failed to load a project", e.getCause());
            } catch (InterruptedException e) {
                e.printStackTrace(); // this is probably not the right thing to do
            }
        }
    } else {
        for (File subdir : subdirs) {
            try {
                long start = System.currentTimeMillis();
                TopLevelItem item = (TopLevelItem) Items.load(this, subdir);
                if (LOG_STARTUP_PERFORMANCE)
                    LOGGER.info(
                            "Loaded " + item.getName() + " in " + (System.currentTimeMillis() - start) + "ms");
                items.put(item.getName(), item);
            } catch (Error e) {
                LOGGER.log(Level.WARNING, "Failed to load " + subdir, e);
            } catch (RuntimeException e) {
                LOGGER.log(Level.WARNING, "Failed to load " + subdir, e);
            } catch (IOException e) {
                LOGGER.log(Level.WARNING, "Failed to load " + subdir, e);
            }
        }
    }
    rebuildDependencyGraph();

    {// recompute label objects
        for (Node slave : slaves)
            slave.getAssignedLabels();
        getAssignedLabels();
    }

    // initialize views by inserting the default view if necessary
    // this is both for clean Hudson and for backward compatibility.
    if (views.size() == 0 || primaryView == null) {
        View v = new AllView(Messages.Hudson_ViewName());
        v.owner = this;
        views.add(0, v);
        primaryView = v.getViewName();
    }

    // read in old data that doesn't have the security field set
    if (authorizationStrategy == null) {
        if (useSecurity == null || !useSecurity)
            authorizationStrategy = AuthorizationStrategy.UNSECURED;
        else
            authorizationStrategy = new LegacyAuthorizationStrategy();
    }
    if (securityRealm == null) {
        if (useSecurity == null || !useSecurity)
            setSecurityRealm(SecurityRealm.NO_AUTHENTICATION);
        else
            setSecurityRealm(new LegacySecurityRealm());
    } else {
        // force the set to proxy
        setSecurityRealm(securityRealm);
    }

    if (useSecurity != null && !useSecurity) {
        // forced reset to the unsecure mode.
        // this works as an escape hatch for people who locked themselves out.
        authorizationStrategy = AuthorizationStrategy.UNSECURED;
        setSecurityRealm(SecurityRealm.NO_AUTHENTICATION);
    }

    // Initialize the filter with the crumb issuer
    setCrumbIssuer(crumbIssuer);

    // auto register root actions
    actions.addAll(getExtensionList(RootAction.class));

    LOGGER.info(String.format("Took %s ms to load", System.currentTimeMillis() - startTime));
    if (KILL_AFTER_LOAD)
        System.exit(0);
}

From source file:org.apache.hadoop.hbase.util.HBaseFsck.java

/**
 * Scan HDFS for all regions, recording their information into
 * regionInfoMap/*from  w w  w .j  a v  a  2  s.  c om*/
 */
public void loadHdfsRegionDirs() throws IOException, InterruptedException {
    Path rootDir = FSUtils.getRootDir(getConf());
    FileSystem fs = rootDir.getFileSystem(getConf());

    // list all tables from HDFS
    List<FileStatus> tableDirs = Lists.newArrayList();

    boolean foundVersionFile = fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));

    List<Path> paths = FSUtils.getTableDirs(fs, rootDir);
    for (Path path : paths) {
        TableName tableName = FSUtils.getTableName(path);
        if ((!checkMetaOnly && isTableIncluded(tableName)) || tableName.equals(TableName.META_TABLE_NAME)) {
            tableDirs.add(fs.getFileStatus(path));
        }
    }

    // verify that version file exists
    if (!foundVersionFile) {
        errors.reportError(ERROR_CODE.NO_VERSION_FILE, "Version file does not exist in root dir " + rootDir);
        if (shouldFixVersionFile()) {
            LOG.info("Trying to create a new " + HConstants.VERSION_FILE_NAME + " file.");
            setShouldRerun();
            FSUtils.setVersion(fs, rootDir, getConf().getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000),
                    getConf().getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
                            HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
        }
    }

    // level 1:  <HBASE_DIR>/*
    List<WorkItemHdfsDir> dirs = new ArrayList<WorkItemHdfsDir>(tableDirs.size());
    List<Future<Void>> dirsFutures;

    for (FileStatus tableDir : tableDirs) {
        LOG.debug("Loading region dirs from " + tableDir.getPath());
        dirs.add(new WorkItemHdfsDir(this, fs, errors, tableDir));
    }

    // Invoke and wait for Callables to complete
    dirsFutures = executor.invokeAll(dirs);

    for (Future<Void> f : dirsFutures) {
        try {
            f.get();
        } catch (ExecutionException e) {
            LOG.warn("Could not load region dir ", e.getCause());
        }
    }
}

From source file:org.apache.hadoop.hbase.util.HBaseFsck.java

/**
 * Contacts each regionserver and fetches metadata about regions.
 * @param regionServerList - the list of region servers to connect to
 * @throws IOException if a remote or network exception occurs
 *///  www.  j  a v  a 2  s  .c  o m
void processRegionServers(Collection<ServerName> regionServerList) throws IOException, InterruptedException {

    List<WorkItemRegion> workItems = new ArrayList<WorkItemRegion>(regionServerList.size());
    List<Future<Void>> workFutures;

    // loop to contact each region server in parallel
    for (ServerName rsinfo : regionServerList) {
        workItems.add(new WorkItemRegion(this, rsinfo, errors, connection));
    }

    workFutures = executor.invokeAll(workItems);

    for (int i = 0; i < workFutures.size(); i++) {
        WorkItemRegion item = workItems.get(i);
        Future<Void> f = workFutures.get(i);
        try {
            f.get();
        } catch (ExecutionException e) {
            LOG.warn("Could not process regionserver " + item.rsinfo.getHostAndPort(), e.getCause());
        }
    }
}