List of usage examples for java.util.concurrent ExecutionException getMessage
public String getMessage()
From source file:hudson.plugins.selenium.PluginImpl.java
public static void startSeleniumNode(Computer c, TaskListener listener, String conf) throws IOException, InterruptedException { LOGGER.fine("Examining if we need to start Selenium Grid Node"); final PluginImpl p = Jenkins.getInstance().getPlugin(PluginImpl.class); final String exclusions = p.getExclusionPatterns(); List<String> exclusionPatterns = new ArrayList<String>(); if (StringUtils.hasText(exclusions)) { exclusionPatterns = Arrays.asList(exclusions.split(SEPARATOR)); }//from w ww .j av a2 s. c o m if (exclusionPatterns.size() > 0) { // loop over all the labels and check if we need to exclude a node // based on the exlusionPatterns for (Label label : c.getNode().getAssignedLabels()) { for (String pattern : exclusionPatterns) { if (label.toString().matches(pattern)) { LOGGER.fine("Node " + c.getNode().getDisplayName() + " is excluded from Selenium Grid because its label '" + label + "' matches exclusion pattern '" + pattern + "'"); return; } } } } final String masterName = PluginImpl.getMasterHostName(); if (masterName == null) { listener.getLogger() .println("Unable to determine the host name of the master. Skipping Selenium execution. " + "Please " + HyperlinkNote.encodeTo("/configure", "configure the Jenkins URL") + " from the system configuration screen."); return; } // make sure that Selenium Hub is started before we start RCs. try { p.waitForHubLaunch(); } catch (ExecutionException e) { throw new IOException("Failed to wait for the Hub launch to complete", e); } List<SeleniumGlobalConfiguration> confs = getPlugin().getGlobalConfigurationForComputer(c); if (confs == null || confs.size() == 0) { LOGGER.fine("There is no matching configurations for that computer. Skipping selenium execution."); return; } listener.getLogger() .println("Starting Selenium nodes on " + ("".equals(c.getName()) ? "(master)" : c.getName())); for (SeleniumGlobalConfiguration config : confs) { if ((conf != null && config.getName().equals(conf)) || conf == null) { try { config.start(c, listener); } catch (ExecutionException e) { LOGGER.log(Level.SEVERE, e.getMessage(), e); } } } }
From source file:org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.java
@Override public Result[] call(int timeout) throws IOException { // If the active replica callable was closed somewhere, invoke the RPC to // really close it. In the case of regular scanners, this applies. We make couple // of RPCs to a RegionServer, and when that region is exhausted, we set // the closed flag. Then an RPC is required to actually close the scanner. if (currentScannerCallable != null && currentScannerCallable.closed) { // For closing we target that exact scanner (and not do replica fallback like in // the case of normal reads) if (LOG.isTraceEnabled()) { LOG.trace("Closing scanner id=" + currentScannerCallable.scannerId); }/*from w ww .j a v a 2 s . c o m*/ Result[] r = currentScannerCallable.call(timeout); currentScannerCallable = null; return r; } // We need to do the following: //1. When a scan goes out to a certain replica (default or not), we need to // continue to hit that until there is a failure. So store the last successfully invoked // replica //2. We should close the "losing" scanners (scanners other than the ones we hear back // from first) // RegionLocations rl = RpcRetryingCallerWithReadReplicas.getRegionLocations(true, RegionReplicaUtil.DEFAULT_REPLICA_ID, cConnection, tableName, currentScannerCallable.getRow()); // allocate a boundedcompletion pool of some multiple of number of replicas. // We want to accomodate some RPCs for redundant replica scans (but are still in progress) ResultBoundedCompletionService<Pair<Result[], ScannerCallable>> cs = new ResultBoundedCompletionService<Pair<Result[], ScannerCallable>>( RpcRetryingCallerFactory.instantiate(ScannerCallableWithReplicas.this.conf), pool, rl.size() * 5); AtomicBoolean done = new AtomicBoolean(false); replicaSwitched.set(false); // submit call for the primary replica. addCallsForCurrentReplica(cs, rl); try { // wait for the timeout to see whether the primary responds back Future<Pair<Result[], ScannerCallable>> f = cs.poll(timeBeforeReplicas, TimeUnit.MICROSECONDS); // Yes, microseconds if (f != null) { Pair<Result[], ScannerCallable> r = f.get(); if (r != null && r.getSecond() != null) { updateCurrentlyServingReplica(r.getSecond(), r.getFirst(), done, pool); } return r == null ? null : r.getFirst(); //great we got a response } } catch (ExecutionException e) { RpcRetryingCallerWithReadReplicas.throwEnrichedException(e, retries); } catch (CancellationException e) { throw new InterruptedIOException(e.getMessage()); } catch (InterruptedException e) { throw new InterruptedIOException(e.getMessage()); } // submit call for the all of the secondaries at once // TODO: this may be an overkill for large region replication addCallsForOtherReplicas(cs, rl, 0, rl.size() - 1); try { Future<Pair<Result[], ScannerCallable>> f = cs.take(); Pair<Result[], ScannerCallable> r = f.get(); if (r != null && r.getSecond() != null) { updateCurrentlyServingReplica(r.getSecond(), r.getFirst(), done, pool); } return r == null ? null : r.getFirst(); // great we got an answer } catch (ExecutionException e) { RpcRetryingCallerWithReadReplicas.throwEnrichedException(e, retries); } catch (CancellationException e) { throw new InterruptedIOException(e.getMessage()); } catch (InterruptedException e) { throw new InterruptedIOException(e.getMessage()); } finally { // We get there because we were interrupted or because one or more of the // calls succeeded or failed. In all case, we stop all our tasks. cs.cancelAll(); } return null; // unreachable }
From source file:com.aliyun.odps.ship.download.DshipDownload.java
private void multiThreadDownload() throws TunnelException { ArrayList<Callable<Long>> callList = new ArrayList<Callable<Long>>(); for (final FileDownloader downloader : workItems) { Callable<Long> call = new Callable<Long>() { @Override//from w w w. j a v a 2 s .co m public Long call() throws Exception { downloader.download(); return downloader.getWrittenBytes(); } }; callList.add(call); } ExecutorService executors = Executors.newFixedThreadPool(threads); try { List<Future<Long>> futures = executors.invokeAll(callList); ArrayList<String> failedThread = new ArrayList<String>(); for (int i = 0; i < futures.size(); ++i) { try { writtenBytes += futures.get(i).get(); } catch (ExecutionException e) { e.printStackTrace(); failedThread.add(String.valueOf(i)); } } if (!failedThread.isEmpty()) { throw new TunnelException("Slice ID:" + StringUtils.join(failedThread, ",") + " Failed."); } } catch (InterruptedException e) { throw new UserInterruptException(e.getMessage()); } }
From source file:com.streamsets.pipeline.stage.origin.jdbc.AbstractTableJdbcSource.java
private void getTables(Stage.Context context, List<ConfigIssue> issues, ConnectionManager connectionManager) throws StageException, SQLException { // clear the list allTableContexts.clear();//www .j a va2s.c o m allTableContexts = listTablesForConfig(getContext(), issues, connectionManager); LOG.info("Selected Tables: \n {}", NEW_LINE_JOINER.join(allTableContexts.keySet())); if (allTableContexts.isEmpty() && !commonSourceConfigBean.allowLateTable) { issues.add(context.createConfigIssue( com.streamsets.pipeline.stage.origin.jdbc.cdc.sqlserver.Groups.TABLE.name(), TableJdbcConfigBean.TABLE_CONFIG, JdbcErrors.JDBC_66)); } else { issues = validatePartitioningConfigs(context, issues, allTableContexts, qualifiedTableNameToConfigIndex); if (!issues.isEmpty()) { return; } numberOfThreads = tableJdbcConfigBean.numberOfThreads; TableOrderProvider tableOrderProvider = new TableOrderProviderFactory(connectionManager.getConnection(), tableJdbcConfigBean.tableOrderStrategy).create(); try { tableOrderProvider.initialize(allTableContexts); if (this.tableOrderProvider == null) { this.tableOrderProvider = new MultithreadedTableProvider(allTableContexts, tableOrderProvider.getOrderedTables(), decideMaxTableSlotsForThreads(), numberOfThreads, tableJdbcConfigBean.batchTableStrategy); } else { this.tableOrderProvider.setTableContextMap(allTableContexts, tableOrderProvider.getOrderedTables()); } } catch (ExecutionException e) { LOG.error("Error during Table Order Provider Init", e); throw new StageException(JdbcErrors.JDBC_67, e.getMessage(), e); } //Accessed by all runner threads offsets = new ConcurrentHashMap<>(); } }
From source file:org.apache.hadoop.hdfs.TestAsyncDFS.java
@Test(timeout = 60000) public void testAsyncAPIWithException() throws Exception { String group1 = "group1"; String group2 = "group2"; String user1 = "user1"; UserGroupInformation ugi1;/*from w w w .j a va 2s. c o m*/ // create fake mapping for the groups Map<String, String[]> u2gMap = new HashMap<String, String[]>(1); u2gMap.put(user1, new String[] { group1, group2 }); DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2gMap); // Initiate all four users ugi1 = UserGroupInformation.createUserForTesting(user1, new String[] { group1, group2 }); final Path parent = new Path("/test/async_api_exception/"); final Path aclDir = new Path(parent, "aclDir"); final Path src = new Path(parent, "src"); final Path dst = new Path(parent, "dst"); fs.mkdirs(aclDir, FsPermission.createImmutable((short) 0700)); fs.mkdirs(src); AsyncDistributedFileSystem adfs1 = ugi1.doAs(new PrivilegedExceptionAction<AsyncDistributedFileSystem>() { @Override public AsyncDistributedFileSystem run() throws Exception { return cluster.getFileSystem().getAsyncDistributedFileSystem(); } }); Future<Void> retFuture; // test rename try { retFuture = adfs1.rename(src, dst, Rename.OVERWRITE); retFuture.get(); } catch (ExecutionException e) { checkPermissionDenied(e, src, user1); assertTrue("Permission denied messages must carry the path parent", e.getMessage().contains(src.getParent().toUri().getPath())); } // test setPermission FsPermission fsPerm = new FsPermission(permGenerator.next()); try { retFuture = adfs1.setPermission(src, fsPerm); retFuture.get(); } catch (ExecutionException e) { checkPermissionDenied(e, src, user1); } // test setOwner try { retFuture = adfs1.setOwner(src, "user1", "group2"); retFuture.get(); } catch (ExecutionException e) { checkPermissionDenied(e, src, user1); } // test setAcl try { retFuture = adfs1.setAcl(aclDir, Lists.newArrayList(aclEntry(ACCESS, USER, ALL))); retFuture.get(); fail("setAcl should fail with permission denied"); } catch (ExecutionException e) { checkPermissionDenied(e, aclDir, user1); } // test getAclStatus try { Future<AclStatus> aclRetFuture = adfs1.getAclStatus(aclDir); aclRetFuture.get(); fail("getAclStatus should fail with permission denied"); } catch (ExecutionException e) { checkPermissionDenied(e, aclDir, user1); } }
From source file:io.flutter.plugins.googlesignin.GoogleSignInPlugin.java
/** * Gets an OAuth access token with the scopes that were specified during {@link * #init(response,List<String>) initialization} for the user with the specified email * address.// ww w . j a va 2 s.c o m */ private void getToken(Response response, final String email) { if (email == null) { response.error(ERROR_REASON_EXCEPTION, "Email is null", null); return; } if (checkAndSetPendingOperation(METHOD_GET_TOKEN, response)) { return; } Callable<String> getTokenTask = new Callable<String>() { @Override public String call() throws Exception { Account account = new Account(email, "com.google"); String scopesStr = "oauth2:" + Joiner.on(' ').join(requestedScopes); return GoogleAuthUtil.getToken(activity.getApplication(), account, scopesStr); } }; backgroundTaskRunner.runInBackground(getTokenTask, new BackgroundTaskRunner.Callback<String>() { @Override public void run(Future<String> tokenFuture) { try { finishWithSuccess(tokenFuture.get()); } catch (ExecutionException e) { Log.e(TAG, "Exception getting access token", e); finishWithError(ERROR_REASON_EXCEPTION, e.getCause().getMessage()); } catch (InterruptedException e) { finishWithError(ERROR_REASON_EXCEPTION, e.getMessage()); } } }); }
From source file:org.geomajas.plugin.rasterizing.layer.RasterDirectLayer.java
@Override public void draw(Graphics2D graphics, MapContent map, MapViewport viewport) { try {//www.j a v a2s.co m if (tiles.size() > 0) { Collection<Callable<ImageResult>> callables = new ArrayList<Callable<ImageResult>>(tiles.size()); // Build the image downloading threads for (RasterTile tile : tiles) { RasterImageDownloadCallable downloadThread = new RasterImageDownloadCallable( DOWNLOAD_MAX_ATTEMPTS, tile); callables.add(downloadThread); } // Loop until all images are downloaded or timeout is reached long totalTimeout = DOWNLOAD_TIMEOUT + DOWNLOAD_TIMEOUT_ONE_TILE * tiles.size(); log.debug("=== total timeout (millis): {}", totalTimeout); ExecutorService service = Executors.newFixedThreadPool(DOWNLOAD_MAX_THREADS); List<Future<ImageResult>> futures = service.invokeAll(callables, totalTimeout, TimeUnit.MILLISECONDS); // determine the pixel bounds of the mosaic Bbox pixelBounds = getPixelBounds(tiles); // create the images for the mosaic List<RenderedImage> images = new ArrayList<RenderedImage>(); for (Future<ImageResult> future : futures) { ImageResult result = null; if (future.isDone()) { try { result = future.get(); // create a rendered image if (result.getImage() != null && result.getImage().length > 0) { RenderedImage image = JAI.create("stream", new ByteArraySeekableStream(result.getImage())); // convert to common direct color model (some images have their own indexed color model) RenderedImage colored = toDirectColorModel(image); // translate to the correct position in the tile grid double xOffset = result.getRasterImage().getCode().getX() * tileWidth - pixelBounds.getX(); double yOffset; // TODO: in some cases, the y-index is up (e.g. WMS), should be down for // all layers !!!! if (isYIndexUp(tiles)) { yOffset = result.getRasterImage().getCode().getY() * tileHeight - pixelBounds.getY(); } else { yOffset = (pixelBounds.getMaxY() - (result.getRasterImage().getCode().getY() + 1) * tileHeight); } log.debug("adding to(" + xOffset + "," + yOffset + "), url = " + result.getRasterImage().getUrl()); RenderedImage translated = TranslateDescriptor.create(colored, (float) xOffset, (float) yOffset, new InterpolationNearest(), null); images.add(translated); } } catch (ExecutionException e) { addLoadError(graphics, (ImageException) (e.getCause()), viewport); log.warn(MISSING_TILE_IN_MOSAIC + e.getMessage()); } catch (Exception e) { log.warn("Missing tile " + result.getRasterImage().getUrl()); log.warn(MISSING_TILE_IN_MOSAIC + e.getMessage()); } } } if (images.size() > 0) { ImageLayout imageLayout = new ImageLayout(0, 0, (int) pixelBounds.getWidth(), (int) pixelBounds.getHeight()); imageLayout.setTileWidth(tileWidth); imageLayout.setTileHeight(tileHeight); // create the mosaic image ParameterBlock pbMosaic = new ParameterBlock(); pbMosaic.add(MosaicDescriptor.MOSAIC_TYPE_OVERLAY); for (RenderedImage renderedImage : images) { pbMosaic.addSource(renderedImage); } RenderedOp mosaic = JAI.create("mosaic", pbMosaic, new RenderingHints(JAI.KEY_IMAGE_LAYOUT, imageLayout)); try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); log.debug("rendering to buffer..."); ImageIO.write(mosaic, "png", baos); log.debug("rendering done, size = " + baos.toByteArray().length); RasterTile mosaicTile = new RasterTile(); mosaicTile.setBounds(getWorldBounds(tiles)); log.info("application bounds = " + mosaicTile.getBounds()); ImageResult mosaicResult = new ImageResult(mosaicTile); mosaicResult.setImage(baos.toByteArray()); addImage(graphics, mosaicResult, viewport); } catch (IOException e) { log.warn("could not write mosaic image " + e.getMessage()); } } } } catch (InterruptedException e) { log.warn("rendering {} to {} failed : ", getTitle(), viewport.getBounds()); } }
From source file:org.phenotips.panels.rest.internal.DefaultGenePanelsResourceImpl.java
@Override public Response getGeneCountsFromPhenotypes() { Request request = this.container.getRequest(); List<String> presentTerms = new ArrayList<>(); for (Object t : request.getProperties("present-term")) { if (t != null) { presentTerms.add((String) t); }// www . j a v a2 s . c o m } presentTerms = Collections.unmodifiableList(presentTerms); List<String> absentTerms = new ArrayList<>(); for (Object t : request.getProperties("absent-term")) { if (t != null) { absentTerms.add((String) t); } } absentTerms = Collections.unmodifiableList(absentTerms); if (CollectionUtils.isEmpty(presentTerms) && CollectionUtils.isEmpty(absentTerms)) { this.logger.error("No content provided."); return Response.status(Response.Status.NO_CONTENT).build(); } final int startPage = NumberUtils.toInt((String) request.getProperty(START_PAGE_LABEL), 1); final int numResults = NumberUtils.toInt((String) request.getProperty(RESULTS_LABEL), -1); final int reqNo = NumberUtils.toInt((String) request.getProperty(REQ_NO), 0); try { // Try to generate the JSON for the requested subset of data. final JSONObject panels = getPageData(this.genePanelLoader.get(presentTerms), startPage, numResults); panels.put(REQ_NO, reqNo); return Response.ok(panels, MediaType.APPLICATION_JSON_TYPE).build(); } catch (final ExecutionException e) { this.logger.error("No content associated with [present-term: {}, absent-term: {}].", presentTerms, absentTerms); return Response.status(Response.Status.NO_CONTENT).build(); } catch (final IndexOutOfBoundsException e) { this.logger.error("The requested [{}: {}] is out of bounds.", START_PAGE_LABEL, startPage); return Response.status(Response.Status.BAD_REQUEST).build(); } catch (final Exception e) { this.logger.error("Unexpected exception while generating gene panel JSON: {}", e.getMessage()); return Response.status(Response.Status.INTERNAL_SERVER_ERROR).build(); } }
From source file:burstcoin.jminer.core.network.task.NetworkSubmitPoolNonceTask.java
@Override public void run() { try {//from ww w . j a v a2 s.com long gb = totalCapacity / 1000 / 1000 / 1000; ContentResponse response = httpClient.POST(poolServer + "/burst").param("requestType", "submitNonce") .param("accountId", numericAccountId).param("nonce", Convert.toUnsignedLong(nonce)) .header("X-Miner", HEADER_MINER_NAME).header("X-Capacity", String.valueOf(gb)) .timeout(connectionTimeout, TimeUnit.MILLISECONDS).send(); if (response.getContentAsString().contains("errorCode")) { LOG.warn("Error: Submit nonce to pool not successful: " + response.getContentAsString()); } else { SubmitResultResponse result = objectMapper.readValue(response.getContentAsString(), SubmitResultResponse.class); if (result.getResult().equals("success")) { if (calculatedDeadline == result.getDeadline()) { publisher.publishEvent(new NetworkResultConfirmedEvent(blockNumber, result.getDeadline(), nonce, chunkPartStartNonce)); } else { // todo re-commit publisher.publishEvent(new NetworkResultErrorEvent(blockNumber, nonce, calculatedDeadline, result.getDeadline(), chunkPartStartNonce)); } } else { LOG.warn("Error: Submit nonce to pool not successful: " + response.getContentAsString()); } } } catch (TimeoutException timeoutException) { LOG.warn("Nonce was committed to pool, but not confirmed ... caused by connectionTimeout," + " currently '" + (connectionTimeout / 1000) + " sec.' try increasing it!"); } catch (ExecutionException e) { // inform user about reward assignment issue if (e.getCause() instanceof HttpResponseException) { HttpResponseException responseException = (HttpResponseException) e.getCause(); if (responseException.getResponse() instanceof HttpContentResponse) { HttpContentResponse httpContentResponse = (HttpContentResponse) responseException.getResponse(); LOG.warn("Error: Failed to submit nonce to pool: " + httpContentResponse.getContentAsString()); } } else { LOG.warn("Error: Failed to submit nonce to pool: " + e.getMessage()); } } catch (Exception e) { LOG.warn("Error: Failed to submit nonce to pool: " + e.getMessage()); } }
From source file:org.opendaylight.netvirt.dhcpservice.DhcpExternalTunnelManager.java
public void handleTunnelStateDown(IpAddress tunnelIp, BigInteger interfaceDpn, List<ListenableFuture<Void>> futures) { LOG.trace("In handleTunnelStateDown tunnelIp {}, interfaceDpn {}", tunnelIp, interfaceDpn); if (interfaceDpn == null) { return;/*from ww w .jav a2 s .c om*/ } try { synchronized (getTunnelIpDpnKey(tunnelIp, interfaceDpn)) { Set<Pair<IpAddress, String>> tunnelElanPairSet = designatedDpnsToTunnelIpElanNameCache .get(interfaceDpn); if (tunnelElanPairSet == null || tunnelElanPairSet.isEmpty()) { return; } WriteTransaction tx = broker.newWriteOnlyTransaction(); for (Pair<IpAddress, String> tunnelElanPair : tunnelElanPairSet) { IpAddress tunnelIpInDpn = tunnelElanPair.getLeft(); if (tunnelIpInDpn.equals(tunnelIp)) { if (!checkL2GatewayConnection(tunnelElanPair)) { LOG.trace("Couldn't find device for given tunnelIpElanPair {} in L2GwConnCache", tunnelElanPair); return; } List<BigInteger> dpns = DhcpServiceUtils.getListOfDpns(broker); dpns.remove(interfaceDpn); changeExistingFlowToDrop(tunnelElanPair, interfaceDpn, tx); updateCacheAndInstallNewFlows(interfaceDpn, dpns, tunnelElanPair, tx); } } futures.add(tx.submit()); } } catch (ExecutionException e) { LOG.error("Error in handleTunnelStateDown {}", e.getMessage()); LOG.trace("Exception details {}", e); } }