List of usage examples for java.lang InterruptedException getCause
public synchronized Throwable getCause()
From source file:com.echopf.ECHOFile.java
/** * {@.en Gets a remote file data.}//from www.java2s . c o m * {@.ja ??????} * @throws ECHOException */ public byte[] getRemoteBytes() throws ECHOException { // Get ready a background thread ExecutorService executor = Executors.newSingleThreadExecutor(); Callable<byte[]> communicator = new Callable<byte[]>() { @Override public byte[] call() throws Exception { InputStream is = getRemoteInputStream(); int nRead; byte[] data = new byte[16384]; ByteArrayOutputStream buffer = new ByteArrayOutputStream(); while ((nRead = is.read(data, 0, data.length)) != -1) { buffer.write(data, 0, nRead); } buffer.flush(); return buffer.toByteArray(); } }; Future<byte[]> future = executor.submit(communicator); try { return future.get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); // ignore/reset } catch (ExecutionException e) { Throwable e2 = e.getCause(); throw new ECHOException(e2); } return null; }
From source file:com.github.horrorho.inflatabledonkey.cloud.Donkey.java
void processConcurrent(HttpClient httpClient, ForkJoinPool fjp, AssetPool pool, FileAssembler consumer) throws IOException { logger.trace("<< processConcurrent()"); try {/*from w w w .j a va 2 s . c o m*/ Collection<StorageHostChunkList> containers = pool.authorize(httpClient); fjp.submit( () -> containers.parallelStream().forEach(u -> processContainer(httpClient, u, pool, consumer))) .get(); } catch (InterruptedException ex) { throw new UncheckedInterruptedException(ex); } catch (ExecutionException ex) { Throwable cause = ex.getCause(); if (cause instanceof RuntimeException) { throw (RuntimeException) cause; } if (cause instanceof IOException) { throw (IOException) cause; } throw new RuntimeException(cause); } logger.trace("<< processConcurrent()"); }
From source file:org.compass.core.executor.DefaultExecutorManager.java
public <T> List<Future<T>> invokeAllWithLimitBailOnException(Collection<Callable<T>> tasks, int concurrencyThreshold) { if (disabled) { throw new UnsupportedOperationException("Executor Manager is disabled"); }/* w w w .j a va 2 s.c o m*/ List<Future<T>> futures = invokeAllWithLimit(tasks, concurrencyThreshold); for (Future<T> future : futures) { try { future.get(); } catch (InterruptedException e) { throw new ExecutorException("Failed to execute, interrupted", e); } catch (ExecutionException e) { if (e.getCause() instanceof CompassException) { throw (CompassException) e.getCause(); } throw new ExecutorException("Failed to execute", e.getCause()); } } return futures; }
From source file:com.anrisoftware.prefdialog.miscswing.actions.Actions.java
Actions() { this.actions = new HashMap<String, List<ActionEntry>>(); this.awtActions = new HashMap<String, List<Runnable>>(); this.actionListener = new PropertyChangeListener() { @Override/*from w w w .java 2 s .com*/ public void propertyChange(PropertyChangeEvent evt) { Future<?> future = (Future<?>) evt.getSource(); try { future.get(); } catch (InterruptedException e) { log.taskInterrupted(future, e); } catch (ExecutionException e) { log.taskError(future, e.getCause()); } } }; }
From source file:com.microsoft.azure.servicebus.samples.managingentity.ManagingEntity.java
private void createQueue(String queueName) { // Name of the queue is a required parameter. // All other queue properties have defaults, and hence optional. QueueDescription queueDescription = new QueueDescription(queueName); // The duration of a peek lock; that is, the amount of time that a message is locked from other receivers. queueDescription.setLockDuration(Duration.ofSeconds(45)); // Size of the Queue. For non-partitioned entity, this would be the size of the queue. // For partitioned entity, this would be the size of each partition. queueDescription.setMaxSizeInMB(2048); // This value indicates if the queue requires guard against duplicate messages. // Find out more in DuplicateDetection sample. queueDescription.setRequiresDuplicateDetection(false); // Since RequiresDuplicateDetection is false, the following need not be specified and will be ignored. // queueDescription.setDuplicationDetectionHistoryTimeWindow(Duration.ofMinutes(2)); // This indicates whether the queue supports the concept of session. queueDescription.setRequiresSession(false); // The default time to live value for the messages. // Find out more in "TimeToLive" sample. queueDescription.setDefaultMessageTimeToLive(Duration.ofDays(7)); // Duration of idle interval after which the queue is automatically deleted. queueDescription.setAutoDeleteOnIdle(ManagementClientConstants.MAX_DURATION); // Decides whether an expired message due to TTL should be dead-lettered. // Find out more in "TimeToLive" sample. queueDescription.setEnableDeadLetteringOnMessageExpiration(false); // The maximum delivery count of a message before it is dead-lettered. // Find out more in "DeadletterQueue" sample. queueDescription.setMaxDeliveryCount(8); // Creating only one partition. // Find out more in PartitionedQueues sample. queueDescription.setEnablePartitioning(false); try {//from ww w. j ava 2 s.c om this.managementClient.createQueueAsync(queueDescription).get(); } catch (InterruptedException e) { System.out.println("Encountered exception while creating Queue - \n" + e.toString()); } catch (ExecutionException e) { if (e.getCause() instanceof ServiceBusException) { System.out.println("Encountered ServiceBusException while creating Queue - \n" + e.toString()); } System.out.println("Encountered exception while creating Queue - \n" + e.toString()); } }
From source file:com.opengamma.integration.viewer.status.impl.ViewStatusCalculationWorker.java
public ViewStatusResultAggregator run() { ViewStatusResultAggregator aggregator = new ViewStatusResultAggregatorImpl(); CompletionService<PerViewStatusResult> completionService = new ExecutorCompletionService<PerViewStatusResult>( _executor);//from w ww. j a v a 2 s . c o m //submit task to executor to run partitioned by security type for (String securityType : _valueRequirementBySecType.keySet()) { Collection<String> valueRequirements = _valueRequirementBySecType.get(securityType); completionService.submit(new ViewStatusCalculationTask(_toolContext, _portfolioId, _user, securityType, valueRequirements, _marketDataSpecification)); } try { // process all completed task for (int i = 0; i < _valueRequirementBySecType.size(); i++) { Future<PerViewStatusResult> futureTask = completionService.take(); PerViewStatusResult perViewStatusResult = futureTask.get(); for (ViewStatusKey viewStatusKey : perViewStatusResult.keySet()) { aggregator.putStatus(viewStatusKey, perViewStatusResult.get(viewStatusKey)); } } } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } catch (ExecutionException ex) { throw new OpenGammaRuntimeException("Error running View status report", ex.getCause()); } return aggregator; }
From source file:org.apache.hadoop.hdfs.BlockStorageLocationUtil.java
/** * Queries datanodes for the blocks specified in <code>datanodeBlocks</code>, * making one RPC to each datanode. These RPCs are made in parallel using a * threadpool./*ww w . j ava2 s . c om*/ * * @param datanodeBlocks * Map of datanodes to the blocks present on the DN * @return metadatas Map of datanodes to block metadata of the DN * @throws InvalidBlockTokenException * if client does not have read access on a requested block */ static Map<DatanodeInfo, HdfsBlocksMetadata> queryDatanodesForHdfsBlocksMetadata(Configuration conf, Map<DatanodeInfo, List<LocatedBlock>> datanodeBlocks, int poolsize, int timeoutMs, boolean connectToDnViaHostname, Tracer tracer, SpanId parentSpanId) throws InvalidBlockTokenException { List<VolumeBlockLocationCallable> callables = createVolumeBlockLocationCallables(conf, datanodeBlocks, timeoutMs, connectToDnViaHostname, tracer, parentSpanId); // Use a thread pool to execute the Callables in parallel List<Future<HdfsBlocksMetadata>> futures = new ArrayList<>(); ExecutorService executor = new ScheduledThreadPoolExecutor(poolsize); try { futures = executor.invokeAll(callables, timeoutMs, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { // Swallow the exception here, because we can return partial results } executor.shutdown(); Map<DatanodeInfo, HdfsBlocksMetadata> metadatas = Maps.newHashMapWithExpectedSize(datanodeBlocks.size()); // Fill in metadatas with results from DN RPCs, where possible for (int i = 0; i < futures.size(); i++) { VolumeBlockLocationCallable callable = callables.get(i); DatanodeInfo datanode = callable.getDatanodeInfo(); Future<HdfsBlocksMetadata> future = futures.get(i); try { HdfsBlocksMetadata metadata = future.get(); metadatas.put(callable.getDatanodeInfo(), metadata); } catch (CancellationException e) { LOG.info( "Cancelled while waiting for datanode " + datanode.getIpcAddr(false) + ": " + e.toString()); } catch (ExecutionException e) { Throwable t = e.getCause(); if (t instanceof InvalidBlockTokenException) { LOG.warn("Invalid access token when trying to retrieve " + "information from datanode " + datanode.getIpcAddr(false)); throw (InvalidBlockTokenException) t; } else if (t instanceof UnsupportedOperationException) { LOG.info("Datanode " + datanode.getIpcAddr(false) + " does not support" + " required #getHdfsBlocksMetadata() API"); throw (UnsupportedOperationException) t; } else { LOG.info( "Failed to query block locations on datanode " + datanode.getIpcAddr(false) + ": " + t); } if (LOG.isDebugEnabled()) { LOG.debug("Could not fetch information from datanode", t); } } catch (InterruptedException e) { // Shouldn't happen, because invokeAll waits for all Futures to be ready LOG.info("Interrupted while fetching HdfsBlocksMetadata"); } } return metadatas; }
From source file:com.basho.riak.client.raw.pbc.PBMapReduceResult.java
public String getResultRaw() { try {/*from w w w. j ava 2 s .c o m*/ return rawResultTask.get(); } catch (InterruptedException e) { // propagate it up Thread.currentThread().interrupt(); // TODO, or return an empty result? throw new RuntimeException(e); } catch (ExecutionException e) { throw new RuntimeException(e.getCause()); } }
From source file:org.openspaces.itest.remoting.simple.plain.SimpleRemotingTests.java
@Test public void testAsyncAsyncExecutionWithException() { try {//from www. j av a 2 s . c o m simpleService.asyncTestException().get(); fail(); } catch (InterruptedException e) { fail(); } catch (ExecutionException e) { if (!(e.getCause() instanceof SimpleService.MyException)) { fail(); } } }
From source file:org.openspaces.itest.remoting.simple.plain.SimpleRemotingTests.java
@Test public void testExecutorAsyncExecutionWithException() { try {/*from ww w .java 2 s . c o m*/ simpleServiceExecutor.asyncTestException().get(); fail(); } catch (InterruptedException e) { fail(); } catch (ExecutionException e) { if (!(e.getCause() instanceof SimpleService.MyException)) { fail(); } } }