Example usage for java.lang InterruptedException getCause

List of usage examples for java.lang InterruptedException getCause

Introduction

In this page you can find the example usage for java.lang InterruptedException getCause.

Prototype

public synchronized Throwable getCause() 

Source Link

Document

Returns the cause of this throwable or null if the cause is nonexistent or unknown.

Usage

From source file:org.openstack.burrow.backend.http.AsyncHttp.java

/**
 * Executes a UpdateMessage request by calling .get() on the executeAsync
 * response/*w  ww .  jav a 2 s  . com*/
 * 
 * @param request The UpdateMessage request object to execute
 * @return A Message object
 * @throws CommandException Thrown if cannot process the request
 * @throws ProtocolException Thrown if an error occurs while executing the
 *           Http request
 */
@Override
public Message execute(UpdateMessage request) throws CommandException, ProtocolException {
    try {
        return executeAsync(request).get();
    } catch (InterruptedException e) {
        throw new BurrowRuntimeException("InterruptedException executing HTTP request", e);
    } catch (ExecutionException e) {
        Throwable t = e.getCause();
        if (t instanceof CommandException)
            throw (CommandException) t;
        if (t instanceof ProtocolException)
            throw (ProtocolException) t;
        throw new BurrowRuntimeException("ExecutionException executing HTTP request", e);
    }
}

From source file:org.openstack.burrow.backend.http.AsyncHttp.java

/**
 * Executes a UpdateMessages request by calling .get() on the executeAsync
 * response//w  w w  .  j ava 2s  .  c  o m
 * 
 * @param request The UpdateMessages request object to execute
 * @return A List of Message objects
 * @throws CommandException Thrown if cannot process the request
 * @throws ProtocolException Thrown if an error occurs while executing the
 *           Http request
 */
@Override
public List<Message> execute(UpdateMessages request) throws CommandException, ProtocolException {
    try {
        return executeAsync(request).get();
    } catch (InterruptedException e) {
        throw new BurrowRuntimeException("InterruptedException executing HTTP request", e);
    } catch (ExecutionException e) {
        Throwable t = e.getCause();
        if (t instanceof CommandException)
            throw (CommandException) t;
        if (t instanceof ProtocolException)
            throw (ProtocolException) t;
        throw new BurrowRuntimeException("ExecutionException executing HTTP request", e);
    }
}

From source file:org.apache.jackrabbit.core.query.lucene.TextExtractorJob.java

/**
 * Returns the reader with the extracted text from the input stream passed
 * to the constructor of this <code>TextExtractorJob</code>. The caller of
 * this method is responsible for closing the returned reader. Returns
 * <code>null</code> if a <code>timeout</code>occurs while waiting for the
 * text extractor to get the reader.// w  w w .  j a va2  s  .  c  o  m
 *
 * @return the Reader with the extracted text. Returns <code>null</code> if
 *         a timeout or an exception occured extracting the text.
 */
public Reader getReader(long timeout) {
    Reader reader = null;
    try {
        reader = (Reader) timedGet(timeout);
    } catch (InterruptedException e) {
        // also covers TimeoutException
        // text not extracted within timeout or interrupted
        if (timeout > 0) {
            log.debug("Text extraction for {} timed out (>{}ms).", type, new Long(timeout));
            timedOut = true;
        }
    } catch (InvocationTargetException e) {
        // extraction failed
        log.warn("Exception while indexing binary property: " + e.getCause());
        log.debug("Dump: ", e.getCause());
    }
    return reader;
}

From source file:com.cloudera.oryx.ml.serving.als.model.ALSServingModel.java

public List<Pair<String, Double>> topN(final DoubleFunction<float[]> scoreFn, final int howMany,
        final Predicate<String> allowedPredicate) {

    List<Callable<Iterable<Pair<String, Double>>>> tasks = new ArrayList<>(Y.length);
    for (int partition = 0; partition < Y.length; partition++) {
        final int thePartition = partition;
        tasks.add(new LoggingCallable<Iterable<Pair<String, Double>>>() {
            @Override/* ww w.  ja  v a  2s.  c o  m*/
            public Iterable<Pair<String, Double>> doCall() {
                Queue<Pair<String, Double>> topN = new PriorityQueue<>(howMany + 1,
                        PairComparators.<Double>bySecond());
                TopNConsumer topNProc = new TopNConsumer(topN, howMany, scoreFn, allowedPredicate);

                Lock lock = yLocks[thePartition].readLock();
                lock.lock();
                try {
                    Y[thePartition].forEach(topNProc);
                } finally {
                    lock.unlock();
                }
                // Ordering and excess items don't matter; will be merged and finally sorted later
                return topN;
            }
        });
    }

    List<Iterable<Pair<String, Double>>> iterables = new ArrayList<>();
    if (Y.length >= 2) {
        try {
            for (Future<Iterable<Pair<String, Double>>> future : executor.invokeAll(tasks)) {
                iterables.add(future.get());
            }
        } catch (InterruptedException e) {
            throw new IllegalStateException(e);
        } catch (ExecutionException e) {
            throw new IllegalStateException(e.getCause());
        }
    } else {
        try {
            iterables.add(tasks.get(0).call());
        } catch (Exception e) {
            throw new IllegalStateException(e);
        }
    }

    return Ordering.from(PairComparators.<Double>bySecond()).greatestOf(Iterables.concat(iterables), howMany);
}

From source file:ch.cyberduck.core.b2.B2LargeUploadService.java

@Override
public BaseB2Response upload(final Path file, final Local local, final BandwidthThrottle throttle,
        final StreamListener listener, final TransferStatus status, final ConnectionCallback callback)
        throws BackgroundException {
    final DefaultThreadPool pool = new DefaultThreadPool("largeupload", concurrency);
    try {/*w ww.  java2s .c  o  m*/
        final String fileid;
        // Get the results of the uploads in the order they were submitted
        // this is important for building the manifest, and is not a problem in terms of performance
        // because we should only continue when all segments have uploaded successfully
        final List<B2UploadPartResponse> completed = new ArrayList<B2UploadPartResponse>();
        final Map<String, String> fileinfo = new HashMap<>(status.getMetadata());
        final Checksum checksum = status.getChecksum();
        if (Checksum.NONE != checksum) {
            switch (checksum.algorithm) {
            case sha1:
                fileinfo.put(X_BZ_INFO_LARGE_FILE_SHA1, status.getChecksum().hash);
                break;
            }
        }
        if (null != status.getTimestamp()) {
            fileinfo.put(X_BZ_INFO_SRC_LAST_MODIFIED_MILLIS, String.valueOf(status.getTimestamp()));
        }
        if (status.isAppend() || status.isRetry()) {
            // Add already completed parts
            final B2LargeUploadPartService partService = new B2LargeUploadPartService(session);
            final List<B2FileInfoResponse> uploads = partService.find(file);
            if (uploads.isEmpty()) {
                fileid = session.getClient()
                        .startLargeFileUpload(
                                new B2FileidProvider(session).getFileid(containerService.getContainer(file),
                                        new DisabledListProgressListener()),
                                containerService.getKey(file), status.getMime(), fileinfo)
                        .getFileId();
            } else {
                fileid = uploads.iterator().next().getFileId();
                completed.addAll(partService.list(fileid));
            }
        } else {
            fileid = session.getClient()
                    .startLargeFileUpload(
                            new B2FileidProvider(session).getFileid(containerService.getContainer(file),
                                    new DisabledListProgressListener()),
                            containerService.getKey(file), status.getMime(), fileinfo)
                    .getFileId();
        }
        // Submit file segments for concurrent upload
        final List<Future<B2UploadPartResponse>> parts = new ArrayList<Future<B2UploadPartResponse>>();
        long remaining = status.getLength();
        long offset = 0;
        for (int partNumber = 1; remaining > 0; partNumber++) {
            boolean skip = false;
            if (status.isAppend() || status.isRetry()) {
                if (log.isInfoEnabled()) {
                    log.info(String.format("Determine if part number %d can be skipped", partNumber));
                }
                for (B2UploadPartResponse c : completed) {
                    if (c.getPartNumber().equals(partNumber)) {
                        if (log.isInfoEnabled()) {
                            log.info(String.format("Skip completed part number %d", partNumber));
                        }
                        skip = true;
                        offset += c.getContentLength();
                        break;
                    }
                }
            }
            if (!skip) {
                final Long length = Math.min(Math.max(
                        ((status.getLength() + status.getOffset()) / B2LargeUploadService.MAXIMUM_UPLOAD_PARTS),
                        partSize), remaining);
                // Submit to queue
                parts.add(this.submit(pool, file, local, throttle, listener, status, partNumber, offset, length,
                        callback));
                if (log.isDebugEnabled()) {
                    log.debug(String.format("Part %s submitted with size %d and offset %d", partNumber, length,
                            offset));
                }
                remaining -= length;
                offset += length;
            }
        }
        try {
            for (Future<B2UploadPartResponse> f : parts) {
                completed.add(f.get());
            }
        } catch (InterruptedException e) {
            log.error("Part upload failed with interrupt failure");
            status.setCanceled();
            throw new ConnectionCanceledException(e);
        } catch (ExecutionException e) {
            log.warn(String.format("Part upload failed with execution failure %s", e.getMessage()));
            if (e.getCause() instanceof BackgroundException) {
                throw (BackgroundException) e.getCause();
            }
            throw new DefaultExceptionMappingService().map(e.getCause());
        } finally {
            pool.shutdown(false);
        }
        completed.sort(new Comparator<B2UploadPartResponse>() {
            @Override
            public int compare(final B2UploadPartResponse o1, final B2UploadPartResponse o2) {
                return o1.getPartNumber().compareTo(o2.getPartNumber());
            }
        });
        final List<String> checksums = new ArrayList<String>();
        for (B2UploadPartResponse part : completed) {
            checksums.add(part.getContentSha1());
        }
        final B2FinishLargeFileResponse response = session.getClient().finishLargeFileUpload(fileid,
                checksums.toArray(new String[checksums.size()]));
        if (log.isInfoEnabled()) {
            log.info(String.format("Finished large file upload %s with %d parts", file, completed.size()));
        }
        // Mark parent status as complete
        status.setComplete();
        return response;
    } catch (B2ApiException e) {
        throw new B2ExceptionMappingService().map("Upload {0} failed", e, file);
    } catch (IOException e) {
        throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file);
    }
}

From source file:org.optaplanner.benchmark.impl.DefaultPlannerBenchmark.java

protected void runSingleBenchmarks() {
    Map<SingleBenchmark, Future<SingleBenchmark>> futureMap = new HashMap<SingleBenchmark, Future<SingleBenchmark>>();
    for (ProblemBenchmark problemBenchmark : unifiedProblemBenchmarkList) {
        for (SingleBenchmark singleBenchmark : problemBenchmark.getSingleBenchmarkList()) {
            Future<SingleBenchmark> future = executorService.submit(singleBenchmark);
            futureMap.put(singleBenchmark, future);
        }/*from w  ww. j a v a  2 s  .  c om*/
    }
    // wait for the benchmarks to complete
    for (Map.Entry<SingleBenchmark, Future<SingleBenchmark>> futureEntry : futureMap.entrySet()) {
        SingleBenchmark singleBenchmark = futureEntry.getKey();
        Future<SingleBenchmark> future = futureEntry.getValue();
        Throwable failureThrowable = null;
        try {
            // Explicitly returning it in the Callable guarantees memory visibility
            singleBenchmark = future.get();
            // TODO WORKAROUND Remove when PLANNER-46 is fixed.
            if (singleBenchmark.getScore() == null) {
                throw new IllegalStateException("Score is null. TODO fix PLANNER-46.");
            }
        } catch (InterruptedException e) {
            logger.error("The singleBenchmark (" + singleBenchmark.getName() + ") was interrupted.", e);
            failureThrowable = e;
        } catch (ExecutionException e) {
            Throwable cause = e.getCause();
            logger.error("The singleBenchmark (" + singleBenchmark.getName() + ") failed.", cause);
            failureThrowable = cause;
        } catch (IllegalStateException e) {
            // TODO WORKAROUND Remove when PLANNER-46 is fixed.
            logger.error("The singleBenchmark (" + singleBenchmark.getName() + ") failed.", e);
            failureThrowable = e;
        }
        if (failureThrowable == null) {
            singleBenchmark.setSucceeded(true);
        } else {
            singleBenchmark.setSucceeded(false);
            singleBenchmark.setFailureThrowable(failureThrowable);
            failureCount++;
            if (firstFailureSingleBenchmark == null) {
                firstFailureSingleBenchmark = singleBenchmark;
            }
        }
    }
}

From source file:org.apache.nutch.searcher.FetchedSegments.java

public Summary[] getSummary(HitDetails[] details, Query query) throws IOException {
    final List<Callable<Summary>> tasks = new ArrayList<Callable<Summary>>(details.length);
    for (int i = 0; i < details.length; i++) {
        tasks.add(new SummaryTask(details[i], query));
    }/*from   w  w w.j  a v a  2 s  .c  o  m*/

    List<Future<Summary>> summaries;
    try {
        summaries = executor.invokeAll(tasks);
    } catch (final InterruptedException e) {
        throw new RuntimeException(e);
    }

    final Summary[] results = new Summary[details.length];
    for (int i = 0; i < details.length; i++) {
        final Future<Summary> f = summaries.get(i);
        Summary summary;
        try {
            summary = f.get();
        } catch (final Exception e) {
            if (e.getCause() instanceof IOException) {
                throw (IOException) e.getCause();
            }
            throw new RuntimeException(e);
        }
        results[i] = summary;
    }
    return results;
}

From source file:com.joyent.manta.client.MantaObjectOutputStream.java

@Override
public synchronized void close() throws IOException {
    this.closed.compareAndSet(false, true);

    Boolean innerIsClosed = isInnerStreamClosed(this.httpContent.getWriter());
    if (innerIsClosed != null && !innerIsClosed) {
        this.httpContent.getWriter().flush();
    }//from  w w w.  j a  v  a  2s .c  o  m

    synchronized (this.httpContent) {
        this.httpContent.notify();
    }

    try {
        this.objectResponse = this.completed.get();
        this.objectResponse.setContentLength(bytesWritten.get());
    } catch (InterruptedException e) {
        // continue execution if interrupted
    } catch (ExecutionException e) {
        /* We wrap the cause because the stack trace for the
         * ExecutionException offers nothing useful and is just a wrapper
         * for exceptions that are thrown within a Future. */
        MantaIOException mioe = new MantaIOException(e.getCause());

        if (this.objectResponse != null) {
            final String requestId = this.objectResponse.getHeaderAsString(MantaHttpHeaders.REQUEST_ID);

            if (requestId != null) {
                mioe.addContextValue("requestId", requestId);
            }
        }

        mioe.addContextValue("path", path);

        throw mioe;
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer.java

private void doCheckpoint() throws InterruptedException, IOException {
    assert canceler != null;
    final long txid;
    final NameNodeFile imageType;

    // Acquire cpLock to make sure no one is modifying the name system.
    // It does not need the full namesystem write lock, since the only thing
    // that modifies namesystem on standby node is edit log replaying.
    namesystem.cpLockInterruptibly();//from   w  w w .j  a  va2 s  .c o  m
    try {
        assert namesystem.getEditLog()
                .isOpenForRead() : "Standby Checkpointer should only attempt a checkpoint when "
                        + "NN is in standby mode, but the edit logs are in an unexpected state";

        FSImage img = namesystem.getFSImage();

        long prevCheckpointTxId = img.getStorage().getMostRecentCheckpointTxId();
        long thisCheckpointTxId = img.getLastAppliedOrWrittenTxId();
        assert thisCheckpointTxId >= prevCheckpointTxId;
        if (thisCheckpointTxId == prevCheckpointTxId) {
            LOG.info("A checkpoint was triggered but the Standby Node has not "
                    + "received any transactions since the last checkpoint at txid " + thisCheckpointTxId
                    + ". Skipping...");
            return;
        }

        if (namesystem.isRollingUpgrade() && !namesystem.getFSImage().hasRollbackFSImage()) {
            // if we will do rolling upgrade but have not created the rollback image
            // yet, name this checkpoint as fsimage_rollback
            imageType = NameNodeFile.IMAGE_ROLLBACK;
        } else {
            imageType = NameNodeFile.IMAGE;
        }
        img.saveNamespace(namesystem, imageType, canceler);
        txid = img.getStorage().getMostRecentCheckpointTxId();
        assert txid == thisCheckpointTxId : "expected to save checkpoint at txid=" + thisCheckpointTxId
                + " but instead saved at txid=" + txid;

        // Save the legacy OIV image, if the output dir is defined.
        String outputDir = checkpointConf.getLegacyOivImageDir();
        if (outputDir != null && !outputDir.isEmpty()) {
            img.saveLegacyOIVImage(namesystem, outputDir, canceler);
        }
    } finally {
        namesystem.cpUnlock();
    }

    // Upload the saved checkpoint back to the active
    // Do this in a separate thread to avoid blocking transition to active
    // See HDFS-4816
    ExecutorService executor = Executors.newSingleThreadExecutor(uploadThreadFactory);
    Future<Void> upload = executor.submit(new Callable<Void>() {
        @Override
        public Void call() throws IOException {
            TransferFsImage.uploadImageFromStorage(activeNNAddress, conf, namesystem.getFSImage().getStorage(),
                    imageType, txid, canceler);
            return null;
        }
    });
    executor.shutdown();
    try {
        upload.get();
    } catch (InterruptedException e) {
        // The background thread may be blocked waiting in the throttler, so
        // interrupt it.
        upload.cancel(true);
        throw e;
    } catch (ExecutionException e) {
        throw new IOException("Exception during image upload: " + e.getMessage(), e.getCause());
    }
}

From source file:com.rowland.hashtrace.utility.AsyncTaskEx.java

/**
 * Creates a new asynchronous task. This constructor must be invoked on the
 * UI thread./*from w ww  .  j av a 2  s .  c o m*/
 */
public AsyncTaskEx() {
    mWorker = new WorkerRunnable<Params, Result>() {
        @Override
        public Result call() throws Exception {
            Process.setThreadPriority(Process.THREAD_PRIORITY_BACKGROUND);
            return doInBackground(mParams);
        }
    };

    mFuture = new FutureTask<Result>(mWorker) {
        @SuppressWarnings("unchecked")
        @Override
        protected void done() {
            Message message;
            Result result = null;

            try {
                result = get();
            } catch (InterruptedException e) {
                android.util.Log.w(LOG_TAG, e);
            } catch (ExecutionException e) {
                throw new RuntimeException("An error occured while executing doInBackground()", e.getCause());
            } catch (CancellationException e) {
                message = sHandler.obtainMessage(MESSAGE_POST_CANCEL,
                        new AsyncTaskExResult<Result>(AsyncTaskEx.this, (Result[]) null));
                message.sendToTarget();
                return;
            } catch (Throwable t) {
                throw new RuntimeException("An error occured while executing " + "doInBackground()", t);
            }

            message = sHandler.obtainMessage(MESSAGE_POST_RESULT,
                    new AsyncTaskExResult<Result>(AsyncTaskEx.this, result));
            message.sendToTarget();
        }
    };
}