List of usage examples for java.util.concurrent ExecutionException getCause
public synchronized Throwable getCause()
From source file:com.vmware.loginsightapi.LogInsightClientMockTest.java
@Test public void testAggregateQueryFailedInCallback() { List<FieldConstraint> constraints = new ConstraintBuilder().eq("vclap_caseid", "1423244") .gt("timestamp", "0").build(); AggregateQuery aqb = (AggregateQuery) new AggregateQuery().limit(100).setConstraints(constraints); testAggregateQueryUrlAndHeaders(aqb); doAnswer(new Answer<Future<HttpResponse>>() { @Override/* ww w.j av a 2 s . c o m*/ public Future<HttpResponse> answer(InvocationOnMock invocation) { @SuppressWarnings("unchecked") FutureCallback<HttpResponse> responseCallback = invocation.getArgumentAt(1, FutureCallback.class); responseCallback.failed(new Exception()); return null; } }).when(asyncHttpClient).execute(any(HttpUriRequest.class), any(FutureCallback.class)); try { CompletableFuture<AggregateResponse> responseFuture = client.aggregateQuery(aqb.toUrlString()); responseFuture.get(0, TimeUnit.MILLISECONDS); } catch (ExecutionException e) { logger.error("Exception raised " + ExceptionUtils.getStackTrace(e)); Assert.assertTrue(e.getCause() instanceof LogInsightApiException); Assert.assertEquals(e.getCause().getMessage(), "Failed message Query"); } catch (Exception e) { Assert.assertTrue(false); } }
From source file:com.vmware.loginsightapi.LogInsightClientMockTest.java
@Test public void testIngestionCancelled() { Message msg1 = new Message("Testing the ingestion"); msg1.addField("vclap_test_id", "11111"); IngestionRequest request = new IngestionRequest(); request.addMessage(msg1);/*from w w w . j av a2 s .co m*/ testIngestionQueryUrlAndHeaders(request); doAnswer(new Answer<Future<HttpResponse>>() { @Override public Future<HttpResponse> answer(InvocationOnMock invocation) { FutureCallback<HttpResponse> responseCallback = invocation.getArgumentAt(1, FutureCallback.class); responseCallback.cancelled(); return null; } }).when(asyncHttpClient).execute(any(HttpUriRequest.class), any(FutureCallback.class)); try { CompletableFuture<IngestionResponse> responseFuture = client.ingest(request); responseFuture.get(0, TimeUnit.MILLISECONDS); } catch (ExecutionException e) { logger.error("Exception raised " + ExceptionUtils.getStackTrace(e)); Assert.assertTrue(e.getCause() instanceof LogInsightApiException); Assert.assertEquals(e.getCause().getMessage(), "Ingestion cancelled"); } catch (Exception e) { Assert.assertTrue(false); } }
From source file:com.vmware.loginsightapi.LogInsightClientMockTest.java
@Test public void testIngestionFailedInCallback() { Message msg1 = new Message("Testing the ingestion"); msg1.addField("vclap_test_id", "11111"); IngestionRequest request = new IngestionRequest(); request.addMessage(msg1);//from ww w . j a v a 2 s . co m testIngestionQueryUrlAndHeaders(request); doAnswer(new Answer<Future<HttpResponse>>() { @Override public Future<HttpResponse> answer(InvocationOnMock invocation) { FutureCallback<HttpResponse> responseCallback = invocation.getArgumentAt(1, FutureCallback.class); responseCallback.failed(new Exception()); return null; } }).when(asyncHttpClient).execute(any(HttpUriRequest.class), any(FutureCallback.class)); try { CompletableFuture<IngestionResponse> responseFuture = client.ingest(request); responseFuture.get(0, TimeUnit.MILLISECONDS); } catch (ExecutionException e) { logger.error("Exception raised " + ExceptionUtils.getStackTrace(e)); Assert.assertTrue(e.getCause() instanceof LogInsightApiException); Assert.assertEquals(e.getCause().getMessage(), "Ingestion failed"); } catch (Exception e) { Assert.assertTrue(false); } }
From source file:com.vmware.loginsightapi.LogInsightClientMockTest.java
@Test public void testMessageQueryRuntimeFailure() { MessageQuery mqb = getMessageQueryForTest(); testMessageQueryUrlAndHeaders(mqb);//ww w . j ava 2 s . c o m HttpResponse response = mock(HttpResponse.class); HttpEntity httpEntity = mock(HttpEntity.class); when(response.getEntity()).thenReturn(httpEntity); doAnswer(new Answer<Future<HttpResponse>>() { @Override public Future<HttpResponse> answer(InvocationOnMock invocation) { @SuppressWarnings("unchecked") FutureCallback<HttpResponse> responseCallback = invocation.getArgumentAt(1, FutureCallback.class); responseCallback.completed(response); return null; } }).when(asyncHttpClient).execute(any(HttpUriRequest.class), any(FutureCallback.class)); try { when(httpEntity.getContent()).thenThrow(Exception.class); CompletableFuture<MessageQueryResponse> responseFuture = client.messageQuery(mqb.toUrlString()); MessageQueryResponse messages = responseFuture.get(0, TimeUnit.MILLISECONDS); } catch (ExecutionException e) { logger.error("Exception raised " + ExceptionUtils.getStackTrace(e)); Assert.assertTrue(e.getCause() instanceof LogInsightApiException); Assert.assertEquals(e.getCause().getMessage(), "Message query failed"); } catch (Exception e1) { Assert.assertTrue(false); } }
From source file:backtype.storm.localizer.Localizer.java
/** * This function updates blobs on the supervisor. It uses a separate thread pool and runs * asynchronously of the download and delete. *//*from w ww . j a va 2s .co m*/ public List<LocalizedResource> updateBlobs(List<LocalResource> localResources, String user) throws AuthorizationException, KeyNotFoundException, IOException { LocalizedResourceSet lrsrcSet = _userRsrc.get(user); ArrayList<LocalizedResource> results = new ArrayList<>(); ArrayList<Callable<LocalizedResource>> updates = new ArrayList<>(); if (lrsrcSet == null) { // resource set must have been removed return results; } ClientBlobStore blobstore = null; try { blobstore = getClientBlobStore(); for (LocalResource localResource : localResources) { String key = localResource.getBlobName(); LocalizedResource lrsrc = lrsrcSet.get(key, localResource.shouldUncompress()); if (lrsrc == null) { LOG.warn("blob requested for update doesn't exist: {}", key); continue; } else { // update it if either the version isn't the latest or if any local blob files are missing if (!isLocalizedResourceUpToDate(lrsrc, blobstore) || !isLocalizedResourceDownloaded(lrsrc)) { LOG.debug("updating blob: {}", key); updates.add(new DownloadBlob(this, _conf, key, new File(lrsrc.getFilePath()), user, lrsrc.isUncompressed(), true)); } } } } finally { if (blobstore != null) { blobstore.shutdown(); } } try { List<Future<LocalizedResource>> futures = _updateExecService.invokeAll(updates); for (Future<LocalizedResource> futureRsrc : futures) { try { LocalizedResource lrsrc = futureRsrc.get(); // put the resource just in case it was removed at same time by the cleaner LocalizedResourceSet newSet = new LocalizedResourceSet(user); LocalizedResourceSet newlrsrcSet = _userRsrc.putIfAbsent(user, newSet); if (newlrsrcSet == null) { newlrsrcSet = newSet; } newlrsrcSet.updateResource(lrsrc.getKey(), lrsrc, lrsrc.isUncompressed()); results.add(lrsrc); } catch (ExecutionException e) { LOG.error("Error updating blob: ", e); if (e.getCause() instanceof AuthorizationException) { throw (AuthorizationException) e.getCause(); } if (e.getCause() instanceof KeyNotFoundException) { throw (KeyNotFoundException) e.getCause(); } } } } catch (RejectedExecutionException re) { LOG.error("Error updating blobs : ", re); } catch (InterruptedException ie) { throw new IOException("Interrupted Exception", ie); } return results; }
From source file:com.vmware.loginsightapi.LogInsightClientMockTest.java
@Test public void testIngestionRuntimeFailure() { Message msg1 = new Message("Testing the ingestion"); msg1.addField("vclap_test_id", "11111"); IngestionRequest request = new IngestionRequest(); request.addMessage(msg1);//from w ww. ja v a 2s .co m testIngestionQueryUrlAndHeaders(request); HttpResponse response = mock(HttpResponse.class); HttpEntity httpEntity = mock(HttpEntity.class); when(response.getEntity()).thenReturn(httpEntity); doAnswer(new Answer<Future<HttpResponse>>() { @Override public Future<HttpResponse> answer(InvocationOnMock invocation) { FutureCallback<HttpResponse> responseCallback = invocation.getArgumentAt(1, FutureCallback.class); responseCallback.completed(response); return null; } }).when(asyncHttpClient).execute(any(HttpUriRequest.class), any(FutureCallback.class)); try { when(httpEntity.getContent()).thenThrow(Exception.class); CompletableFuture<IngestionResponse> responseFuture = client.ingest(request); responseFuture.get(0, TimeUnit.MILLISECONDS); } catch (ExecutionException e) { logger.error("Exception raised " + ExceptionUtils.getStackTrace(e)); Assert.assertTrue(e.getCause() instanceof LogInsightApiException); Assert.assertEquals(e.getCause().getMessage(), "Ingestion failed"); } catch (Exception e1) { Assert.assertTrue(false); } }
From source file:com.mikebl71.android.websms.connector.cabbage.CabbageConnector.java
/** * Called to update balance. Updates subconnector's balances concurrently. */// w w w . j a v a 2s. c om @Override protected void doUpdate(final Context context, final Intent intent) { final ConnectorSpec cs = this.getSpec(context); final int subCount = cs.getSubConnectorCount(); final SubConnectorSpec[] subs = cs.getSubConnectors(); final List<Callable<Void>> tasks = new ArrayList<Callable<Void>>(subCount); for (SubConnectorSpec sub : subs) { final String subId = sub.getID(); tasks.add(new Callable<Void>() { public Void call() throws Exception { // clone intent and assign it to this sub connector final Intent subIntent = new Intent(intent); ConnectorCommand cmd = new ConnectorCommand(subIntent); cmd.setSelectedSubConnector(subId); cmd.setToIntent(subIntent); // update balance for this subconnector sendData(context, new ConnectorCommand(subIntent)); return null; } }); } try { final ExecutorService executor = Executors.newFixedThreadPool(subCount); // execute all updates in parallel and wait till all are complete final List<Future<Void>> results = executor.invokeAll(tasks); executor.shutdownNow(); // if any of the updates failed then re-throw the first exception // (which will then be returned to WebSMS) for (int idx = 0; idx < results.size(); idx++) { Future<Void> result = results.get(idx); try { result.get(); } catch (ExecutionException ex) { String subName = subs[idx].getName(); throw new WebSMSException( subName + ": " + ConnectorSpec.convertErrorMessage(context, ex.getCause())); } } } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } }
From source file:org.zeroturnaround.exec.ProcessExecutor.java
/** * Wait until the process stops, a timeout occurs and the caller thread gets interrupted. * In the latter cases the process gets destroyed as well. *//*from w w w .j a va 2 s . c o m*/ private ProcessResult waitFor(WaitForProcess task) throws IOException, InterruptedException, TimeoutException { ProcessResult result; if (timeout == null) { // Use the current thread result = task.call(); } else { // Fork another thread to invoke Process.waitFor() ExecutorService service = Executors.newSingleThreadScheduledExecutor(); try { result = service.submit(task).get(timeout, timeoutUnit); } catch (ExecutionException e) { Throwable c = e.getCause(); if (c instanceof IOException) throw (IOException) c; if (c instanceof InterruptedException) throw (InterruptedException) c; if (c instanceof InvalidExitValueException) throw (InvalidExitValueException) c; throw new IllegalStateException("Error occured while waiting for process to finish:", c); } catch (TimeoutException e) { log.debug("{} is running too long", task); throw e; } finally { // Interrupt the task if it's still running and release the ExecutorService's resources service.shutdownNow(); } } return result; }
From source file:com.microsoft.azure.AzureVMManagementServiceDelegate.java
/** * Terminates a virtual machine/*from w w w .ja v a 2s . c o m*/ * * @param config Azure configuration * @param vmName VM name * @param resourceGroupName Resource group containing the VM * @throws Exception */ public static void terminateVirtualMachine(final Configuration config, final String vmName, final String resourceGroupName) throws Exception { try { try { if (virtualMachineExists(config, vmName, resourceGroupName)) { final ComputeManagementClient client = ServiceDelegateHelper.getComputeManagementClient(config); List<URI> diskUrisToRemove = new ArrayList<URI>(); StorageProfile storageProfile = client.getVirtualMachinesOperations() .get(resourceGroupName, vmName).getVirtualMachine().getStorageProfile(); // Remove the OS disks diskUrisToRemove.add(new URI(storageProfile.getOSDisk().getVirtualHardDisk().getUri())); // TODO: Remove data disks or add option to do so? // Remove the VM LOGGER.log(Level.INFO, "AzureVMManagementServiceDelegate: terminateVirtualMachine: Removing virtual machine {0}", vmName); client.getVirtualMachinesOperations().delete(resourceGroupName, vmName); // Now remove the disks for (URI diskUri : diskUrisToRemove) { // Obtain container, storage account, and blob name String storageAccountName = diskUri.getHost().split("\\.")[0]; String containerName = PathUtility.getContainerNameFromUri(diskUri, false); String blobName = PathUtility.getBlobNameFromURI(diskUri, false); LOGGER.log(Level.INFO, "AzureVMManagementServiceDelegate: terminateVirtualMachine: Removing disk blob {0}, in container {1} of storage account {2}", new Object[] { blobName, containerName, storageAccountName }); final StorageManagementClient storageClient = ServiceDelegateHelper .getStorageManagementClient(config); StorageAccountKeys storageKeys = storageClient.getStorageAccountsOperations() .listKeys(resourceGroupName, storageAccountName).getStorageAccountKeys(); URI blobURI = storageClient.getStorageAccountsOperations() .getProperties(resourceGroupName, storageAccountName).getStorageAccount() .getPrimaryEndpoints().getBlob(); CloudBlobContainer container = StorageServiceDelegate.getBlobContainerReference( storageAccountName, storageKeys.getKey1(), blobURI.toString(), containerName); container.getBlockBlobReference(blobName).deleteIfExists(); } // Also remove the init script (if it exists) } } catch (ExecutionException ee) { LOGGER.log(Level.INFO, "AzureVMManagementServiceDelegate: terminateVirtualMachine: while deleting VM", ee); if (!(ee.getCause() instanceof IllegalArgumentException)) { throw ee; } // assume VM is no longer available } catch (ServiceException se) { LOGGER.log(Level.INFO, "AzureVMManagementServiceDelegate: terminateVirtualMachine: while deleting VM", se); // Check if VM is already deleted: if VM is already deleted then just ignore exception. if (!Constants.ERROR_CODE_RESOURCE_NF.equalsIgnoreCase(se.getError().getCode())) { throw se; } } finally { LOGGER.log(Level.INFO, "Clean operation starting for {0} NIC and IP", vmName); ExecutionEngine.executeAsync(new Callable<Void>() { @Override public Void call() throws Exception { removeIPName(config, resourceGroupName, vmName); return null; } }, new NoRetryStrategy()); } } catch (UnrecoverableCloudException uce) { LOGGER.log(Level.INFO, "AzureVMManagementServiceDelegate: terminateVirtualMachine: unrecoverable exception deleting VM", uce); } }
From source file:org.apache.hadoop.hbase.regionserver.HFileReadWriteTest.java
public boolean runRandomReadWorkload() throws IOException { if (inputFileNames.size() != 1) { throw new IOException("Need exactly one input file for random reads: " + inputFileNames); }//from w w w. j a va 2 s . com Path inputPath = new Path(inputFileNames.get(0)); // Make sure we are using caching. StoreFile storeFile = openStoreFile(inputPath, true); StoreFile.Reader reader = storeFile.createReader(); LOG.info("First key: " + Bytes.toStringBinary(reader.getFirstKey())); LOG.info("Last key: " + Bytes.toStringBinary(reader.getLastKey())); KeyValue firstKV = KeyValue.createKeyValueFromKey(reader.getFirstKey()); firstRow = firstKV.getRow(); KeyValue lastKV = KeyValue.createKeyValueFromKey(reader.getLastKey()); lastRow = lastKV.getRow(); byte[] family = firstKV.getFamily(); if (!Bytes.equals(family, lastKV.getFamily())) { LOG.error("First and last key have different families: " + Bytes.toStringBinary(family) + " and " + Bytes.toStringBinary(lastKV.getFamily())); return false; } if (Bytes.equals(firstRow, lastRow)) { LOG.error("First and last row are the same, cannot run read workload: " + "firstRow=" + Bytes.toStringBinary(firstRow) + ", " + "lastRow=" + Bytes.toStringBinary(lastRow)); return false; } ExecutorService exec = Executors.newFixedThreadPool(numReadThreads + 1); int numCompleted = 0; int numFailed = 0; try { ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<Boolean>(exec); endTime = System.currentTimeMillis() + 1000 * durationSec; boolean pread = true; for (int i = 0; i < numReadThreads; ++i) ecs.submit(new RandomReader(i, reader, pread)); ecs.submit(new StatisticsPrinter()); Future<Boolean> result; while (true) { try { result = ecs.poll(endTime + 1000 - System.currentTimeMillis(), TimeUnit.MILLISECONDS); if (result == null) break; try { if (result.get()) { ++numCompleted; } else { ++numFailed; } } catch (ExecutionException e) { LOG.error("Worker thread failure", e.getCause()); ++numFailed; } } catch (InterruptedException ex) { LOG.error("Interrupted after " + numCompleted + " workers completed"); Thread.currentThread().interrupt(); continue; } } } finally { storeFile.closeReader(true); exec.shutdown(); BlockCache c = cacheConf.getBlockCache(); if (c != null) { c.shutdown(); } } LOG.info("Worker threads completed: " + numCompleted); LOG.info("Worker threads failed: " + numFailed); return true; }