Example usage for java.lang InterruptedException getCause

List of usage examples for java.lang InterruptedException getCause

Introduction

In this page you can find the example usage for java.lang InterruptedException getCause.

Prototype

public synchronized Throwable getCause() 

Source Link

Document

Returns the cause of this throwable or null if the cause is nonexistent or unknown.

Usage

From source file:ch.algotrader.service.tt.TTFixReferenceDataServiceImpl.java

private List<TTSecurityDefVO> getSecurityDefsBlocking(final Promise<List<TTSecurityDefVO>> promise) {
    try {/*w  w  w . j  a  va 2s.c o  m*/
        return promise.get(10, TimeUnit.SECONDS);
    } catch (InterruptedException ex) {
        Thread.currentThread().interrupt();
        throw new ServiceException(ex);
    } catch (TimeoutException e) {
        throw new NoServiceResponseException("No response from TT after 10 seconds");
    } catch (ExecutionException ex) {
        Throwable cause = ex.getCause();
        throw new ServiceException(cause != null ? cause.getMessage() : "Unexpected exception", cause);
    }
}

From source file:org.springframework.kafka.core.KafkaAdmin.java

private void addTopics(AdminClient adminClient, List<NewTopic> topicsToAdd) {
    CreateTopicsResult topicResults = adminClient.createTopics(topicsToAdd);
    try {//w w  w . j a  v  a 2 s.c  o  m
        topicResults.all().get(this.operationTimeout, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        logger.error("Interrupted while waiting for topic creation results", e);
    } catch (TimeoutException e) {
        throw new KafkaException("Timed out waiting for create topics results", e);
    } catch (ExecutionException e) {
        logger.error("Failed to create topics", e.getCause());
        throw new KafkaException("Failed to create topics", e.getCause()); // NOSONAR
    }
}

From source file:ch.cyberduck.core.openstack.SwiftLargeObjectUploadFeature.java

@Override
public StorageObject upload(final Path file, final Local local, final BandwidthThrottle throttle,
        final StreamListener listener, final TransferStatus status, final ConnectionCallback callback)
        throws BackgroundException {
    final DefaultThreadPool pool = new DefaultThreadPool("multipart", concurrency);
    final List<Path> existingSegments = new ArrayList<Path>();
    if (status.isAppend() || status.isRetry()) {
        // Get a lexicographically ordered list of the existing file segments
        existingSegments//from w  ww. ja  va  2 s  . c  om
                .addAll(listService
                        .list(segmentService.getSegmentsDirectory(file,
                                status.getOffset() + status.getLength()), new DisabledListProgressListener())
                        .toList());
    }
    // Get the results of the uploads in the order they were submitted
    // this is important for building the manifest, and is not a problem in terms of performance
    // because we should only continue when all segments have uploaded successfully
    final List<StorageObject> completed = new ArrayList<StorageObject>();
    // Submit file segments for concurrent upload
    final List<Future<StorageObject>> segments = new ArrayList<Future<StorageObject>>();
    long remaining = status.getLength();
    long offset = 0;
    for (int segmentNumber = 1; remaining > 0; segmentNumber++) {
        final Long length = Math.min(segmentSize, remaining);
        // Segment name with left padded segment number
        final Path segment = segmentService.getSegment(file, status.getOffset() + status.getLength(),
                segmentNumber);
        if (existingSegments.contains(segment)) {
            final Path existingSegment = existingSegments.get(existingSegments.indexOf(segment));
            if (log.isDebugEnabled()) {
                log.debug(String.format("Skip segment %s", existingSegment));
            }
            final StorageObject stored = new StorageObject(containerService.getKey(segment));
            if (HashAlgorithm.md5.equals(existingSegment.attributes().getChecksum().algorithm)) {
                stored.setMd5sum(existingSegment.attributes().getChecksum().hash);
            }
            stored.setSize(existingSegment.attributes().getSize());
            offset += existingSegment.attributes().getSize();
            completed.add(stored);
        } else {
            // Submit to queue
            segments.add(
                    this.submit(pool, segment, local, throttle, listener, status, offset, length, callback));
            if (log.isDebugEnabled()) {
                log.debug(String.format("Segment %s submitted with size %d and offset %d", segment, length,
                        offset));
            }
            remaining -= length;
            offset += length;
        }
    }
    try {
        for (Future<StorageObject> futureSegment : segments) {
            completed.add(futureSegment.get());
        }
    } catch (InterruptedException e) {
        log.error("Part upload failed with interrupt failure");
        status.setCanceled();
        throw new ConnectionCanceledException(e);
    } catch (ExecutionException e) {
        log.warn(String.format("Part upload failed with execution failure %s", e.getMessage()));
        if (e.getCause() instanceof BackgroundException) {
            throw (BackgroundException) e.getCause();
        }
        throw new DefaultExceptionMappingService().map(e.getCause());
    } finally {
        pool.shutdown(false);
    }
    // Mark parent status as complete
    status.setComplete();
    if (log.isInfoEnabled()) {
        log.info(String.format("Finished large file upload %s with %d parts", file, completed.size()));
    }
    // Create and upload the large object manifest. It is best to upload all the segments first and
    // then create or update the manifest.
    try {
        // Static Large Object.
        final String manifest = segmentService.manifest(containerService.getContainer(file).getName(),
                completed);
        if (log.isDebugEnabled()) {
            log.debug(String.format("Creating SLO manifest %s for %s", manifest, file));
        }
        final StorageObject stored = new StorageObject(containerService.getKey(file));
        final String checksum = session.getClient().createSLOManifestObject(
                regionService.lookup(containerService.getContainer(file)),
                containerService.getContainer(file).getName(), status.getMime(), containerService.getKey(file),
                manifest, Collections.emptyMap());
        // The value of the Content-Length header is the total size of all segment objects, and the value of the ETag header is calculated by taking
        // the ETag value of each segment, concatenating them together, and then returning the MD5 checksum of the result.
        stored.setMd5sum(checksum);
        return stored;
    } catch (GenericException e) {
        throw new SwiftExceptionMappingService().map("Upload {0} failed", e);
    } catch (IOException e) {
        throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file);
    }
}

From source file:org.springframework.kafka.core.KafkaAdmin.java

private void modifyTopics(AdminClient adminClient, Map<String, NewPartitions> topicsToModify) {
    CreatePartitionsResult partitionsResult = adminClient.createPartitions(topicsToModify);
    try {//  ww w  . java  2  s. c  o  m
        partitionsResult.all().get(this.operationTimeout, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        logger.error("Interrupted while waiting for partition creation results", e);
    } catch (TimeoutException e) {
        throw new KafkaException("Timed out waiting for create partitions results", e);
    } catch (ExecutionException e) {
        logger.error("Failed to create partitions", e.getCause());
        if (!(e.getCause() instanceof UnsupportedVersionException)) {
            throw new KafkaException("Failed to create partitions", e.getCause()); // NOSONAR
        }
    }
}

From source file:org.opendaylight.controller.sal.restconf.impl.RestconfImpl.java

private static DOMRpcResult checkRpcResponse(final CheckedFuture<DOMRpcResult, DOMRpcException> response) {
    if (response == null) {
        return null;
    }/*from   ww w. j a  v  a  2 s.c o m*/
    try {
        final DOMRpcResult retValue = response.get();
        if (retValue.getErrors() == null || retValue.getErrors().isEmpty()) {
            return retValue;
        }
        LOG.debug("RpcError message", retValue.getErrors());
        throw new RestconfDocumentedException("RpcError message", null, retValue.getErrors());
    } catch (final InterruptedException e) {
        final String errMsg = "The operation was interrupted while executing and did not complete.";
        LOG.debug("Rpc Interrupt - " + errMsg, e);
        throw new RestconfDocumentedException(errMsg, ErrorType.RPC, ErrorTag.PARTIAL_OPERATION);
    } catch (final ExecutionException e) {
        LOG.debug("Execution RpcError: ", e);
        Throwable cause = e.getCause();
        if (cause != null) {
            while (cause.getCause() != null) {
                cause = cause.getCause();
            }

            if (cause instanceof IllegalArgumentException) {
                throw new RestconfDocumentedException(cause.getMessage(), ErrorType.PROTOCOL,
                        ErrorTag.INVALID_VALUE);
            }
            throw new RestconfDocumentedException(
                    "The operation encountered an unexpected error while executing.", cause);
        }
        throw new RestconfDocumentedException("The operation encountered an unexpected error while executing.",
                e);
    } catch (final CancellationException e) {
        final String errMsg = "The operation was cancelled while executing.";
        LOG.debug("Cancel RpcExecution: " + errMsg, e);
        throw new RestconfDocumentedException(errMsg, ErrorType.RPC, ErrorTag.PARTIAL_OPERATION);
    }
}

From source file:org.signserver.client.cli.defaultimpl.SignDataGroupsCommand.java

/**
 * Execute the signing operation./*ww  w.  j a v a  2  s.c o  m*/
 */
public final void run() {
    try {
        final int NUM_WORKERS = 1;
        Worker workers[] = new Worker[NUM_WORKERS];
        PrintStream outputStream = getOutputStream();
        if (outputStream == null) {
            outputStream = System.out;
        }
        for (int i = 0; i < NUM_WORKERS; i++) {
            workers[i] = new Worker("Worker " + i, createSigner(), dataGroups, encoding, repeat, outputStream);
        }

        // Start workers
        for (Worker worker : workers) {
            worker.start();
        }

        // Wait for worker
        for (Worker worker : workers) {
            System.err.println("Waiting for " + worker);
            try {
                worker.join();
            } catch (InterruptedException ex) {
                System.err.println("Interrupted!");
            }
        }

        System.err.println("Done");

    } catch (SOAPFaultException ex) {
        if (ex.getCause() instanceof AuthorizationRequiredException) {
            final AuthorizationRequiredException authEx = (AuthorizationRequiredException) ex.getCause();
            LOG.error("Authorization required: " + authEx.getMessage());
        }
        LOG.error(ex);
    } catch (IOException ex) {
        LOG.error(ex);
    }
}

From source file:ch.algotrader.service.ib.IBNativeReferenceDataServiceImpl.java

private Set<ContractDetails> getContractDetailsBlocking(final Promise<List<ContractDetails>> promise) {
    try {//from   ww  w  .  jav  a 2s . co  m
        int requestTimeout = this.ibConfig.getRequestTimeout();
        return new HashSet<>(promise.get(requestTimeout, TimeUnit.SECONDS));
    } catch (InterruptedException ex) {
        Thread.currentThread().interrupt();
        throw new ServiceException(ex);
    } catch (TimeoutException ex) {
        throw new ExternalServiceException("Service request timeout");
    } catch (ExecutionException ex) {
        throw IBNativeSupport.rethrow(ex.getCause());
    }
}

From source file:com.mirth.connect.plugins.datatypes.delimited.DelimitedBatchAdaptor.java

/**
 * Finds the next message in the input stream and returns it.
 * //  w  w  w  .  j  a  v a 2s . c  o m
 * @param in
 *            The input stream (it's a BufferedReader, because operations on it require
 *            in.mark()).
 * @param skipHeader
 *            Pass true to skip the configured number of header rows, otherwise false.
 * @return The next message, or null if there are no more messages.
 * @throws IOException
 * @throws InterruptedException
 */
private String getMessage(final BufferedReader in, final boolean skipHeader) throws Exception {
    String recDelim = delimitedReader.getRecordDelimiter();
    int ch;
    String lookAhead = "";
    // If skipping the header, and the option is configured, consume all the skip records,
    // including the record delimiters
    if (skipHeader && batchProperties.getBatchSkipRecords() > 0) {
        for (int i = 0; i < batchProperties.getBatchSkipRecords(); i++) {
            do {
                ch = delimitedReader.getChar(in, null);
                lookAhead = delimitedReader.peekChars(in, recDelim.length());
            } while (ch != -1 && !lookAhead.equals(recDelim));

            if (lookAhead.equals(recDelim)) {
                for (int j = 0; j < recDelim.length(); j++) {
                    ch = delimitedReader.getChar(in, null);
                }
            }
        }
    }

    StringBuilder message = new StringBuilder();
    SplitType splitOption = batchProperties.getSplitType();

    if (splitOption == SplitType.Record) {
        // Each record is treated as a message
        delimitedReader.getRecord(in, message);
    } else if (splitOption == SplitType.Delimiter) {
        if (StringUtils.isEmpty(batchProperties.getBatchMessageDelimiter())) {
            throw new BatchMessageException("No batch message delimiter was set.");
        }

        if (batchMessageDelimiter == null) {
            batchMessageDelimiter = StringUtil.unescape(batchProperties.getBatchMessageDelimiter());
        }
        // All records until the message delimiter (or end of input) is a
        // message.
        for (;;) {
            // Get the next record
            ArrayList<String> record = delimitedReader.getRecord(in, message);

            if (record == null) {
                break;
            }

            // If the next sequence of characters is the message delimiter
            lookAhead = delimitedReader.peekChars(in, batchMessageDelimiter.length());
            if (lookAhead.equals(batchMessageDelimiter)) {

                // Consume it.
                for (int i = 0; i < batchMessageDelimiter.length(); i++) {
                    ch = delimitedReader.getChar(in, null);
                }

                // Append it if it is being included
                if (batchProperties.isBatchMessageDelimiterIncluded()) {
                    message.append(batchMessageDelimiter);
                }

                break;
            }
        }
    } else if (splitOption == SplitType.Grouping_Column) {
        if (StringUtils.isEmpty(batchProperties.getBatchGroupingColumn())) {
            throw new BatchMessageException("No batch grouping column was set.");
        }

        // Each message is a collection of records with the same value in
        // the specified column.
        // The end of the current message occurs when a transition in the
        // value of the specified
        // column occurs.

        // Prime the pump: get the first record, and save the grouping
        // column.
        ArrayList<String> record = delimitedReader.getRecord(in, message);

        if (record != null) {

            if (groupingColumnIndex == null) {
                updateGroupingColumnIndex(batchProperties.getBatchGroupingColumn(),
                        serializationProperties.getColumnNames());
            }

            String lastColumnValue = record.get(groupingColumnIndex);

            // Read records until the value in the grouping column changes
            // or there are no more records
            for (;;) {

                StringBuilder recordText = new StringBuilder();
                record = delimitedReader.getRecord(in, recordText);

                if (record == null) {
                    break;
                }

                if (!record.get(groupingColumnIndex).equals(lastColumnValue)) {
                    delimitedReader.ungetRecord(record, recordText.toString());
                    break;
                }

                message.append(recordText.toString());
            }
        }
    } else if (splitOption == SplitType.JavaScript) {
        if (StringUtils.isEmpty(batchProperties.getBatchScript())) {
            throw new BatchMessageException("No batch script was set.");
        }

        try {
            final int batchSkipRecords = batchProperties.getBatchSkipRecords();
            final String batchScriptId = ScriptController.getScriptId(ScriptController.BATCH_SCRIPT_KEY,
                    sourceConnector.getChannelId());

            MirthContextFactory contextFactory = contextFactoryController
                    .getContextFactory(sourceConnector.getChannel().getResourceIds());
            if (!factory.getContextFactoryId().equals(contextFactory.getId())) {
                synchronized (factory) {
                    contextFactory = contextFactoryController
                            .getContextFactory(sourceConnector.getChannel().getResourceIds());
                    if (!factory.getContextFactoryId().equals(contextFactory.getId())) {
                        JavaScriptUtil.recompileGeneratedScript(contextFactory, batchScriptId);
                        factory.setContextFactoryId(contextFactory.getId());
                    }
                }
            }

            String result = JavaScriptUtil.execute(
                    new JavaScriptTask<String>(contextFactory, "Delimited Batch Adaptor", sourceConnector) {
                        @Override
                        public String doCall() throws Exception {
                            Script compiledScript = CompiledScriptCache.getInstance()
                                    .getCompiledScript(batchScriptId);

                            if (compiledScript == null) {
                                logger.error("Batch script could not be found in cache");
                                return null;
                            } else {
                                Logger scriptLogger = Logger
                                        .getLogger(ScriptController.BATCH_SCRIPT_KEY.toLowerCase());

                                try {
                                    Scriptable scope = JavaScriptScopeUtil.getBatchProcessorScope(
                                            getContextFactory(), scriptLogger, sourceConnector.getChannelId(),
                                            sourceConnector.getChannel().getName(), getScopeObjects(in,
                                                    serializationProperties, skipHeader, batchSkipRecords));
                                    return (String) Context.jsToJava(executeScript(compiledScript, scope),
                                            String.class);
                                } finally {
                                    Context.exit();
                                }
                            }
                        }
                    });

            if (result != null) {
                message.append(result);
            }
        } catch (InterruptedException e) {
            throw e;
        } catch (JavaScriptExecutorException e) {
            logger.error(e.getCause());
        } catch (Throwable e) {
            logger.error(e);
        }
    } else {
        throw new BatchMessageException("No valid batch splitting method configured");
    }

    String result = message.toString();
    if (result.length() == 0) {
        return null;
    } else {
        return result;
    }
}

From source file:org.apache.hadoop.registry.server.services.RegistryAdminService.java

/**
 * Create the home path for a user if it does not exist.
 *
 * This uses {@link #initUserRegistryAsync(String)} and then waits for the
 * result ... the code path is the same as the async operation; this just
 * picks up and relays/converts exceptions
 * @param username username/*from   w w  w . jav a  2s.c  om*/
 * @return the path created
 * @throws IOException any failure
 *
 */
public String initUserRegistry(final String username) throws IOException {

    try {
        Future<Boolean> future = initUserRegistryAsync(username);
        future.get();
    } catch (InterruptedException e) {
        throw (InterruptedIOException) (new InterruptedIOException(e.toString()).initCause(e));
    } catch (ExecutionException e) {
        Throwable cause = e.getCause();
        if (cause instanceof IOException) {
            throw (IOException) (cause);
        } else {
            throw new IOException(cause.toString(), cause);
        }
    }

    return homeDir(username);
}

From source file:org.apache.hadoop.hbase.client.crosssite.CrossSiteClientScanner.java

private void initialize() throws IOException {
    int count = this.clusterStartStopKeyPairs.size();
    List<Future<ScannerIterator>> futures = new ArrayList<Future<ScannerIterator>>();
    for (int i = count - 1; i >= 0; i--) {
        Callable<ScannerIterator> callable = createCallable(clusterStartStopKeyPairs.get(i), this.tableName,
                ignoreUnavailableClusters);
        if (callable != null) {
            futures.add(pool.submit(callable));
        }//w  w  w  .  j  a  v a2 s. c om
    }

    IOException exception = null;
    for (Future<ScannerIterator> future : futures) {
        try {
            ScannerIterator iter = future.get();
            if (iter != null) {
                clusterScannerIterators.add(iter);
            }
        } catch (InterruptedException e) {
            exception = new IOException("Interrupted", e);
        } catch (ExecutionException e) {
            exception = new IOException(e.getCause());
        }
    }
    if (exception != null) {
        close();
        // just throw the last exception
        throw exception;
    }
    if (clusterScannerIterators.size() == 0) {
        // add an empty scanner iterator
        LOG.debug("The ScannerIterator is empty, the EmptyScannerIterator is used instead");
        clusterScannerIterators.add(new EmptyScannerIterator());
    }
    this.resultIterator = new MergeSortIterator<Result>(clusterScannerIterators, new ResultComparator());
}