Example usage for java.util.concurrent CompletionException CompletionException

List of usage examples for java.util.concurrent CompletionException CompletionException

Introduction

In this page you can find the example usage for java.util.concurrent CompletionException CompletionException.

Prototype

public CompletionException(Throwable cause) 

Source Link

Document

Constructs a CompletionException with the specified cause.

Usage

From source file:io.pravega.controller.store.stream.PersistentStreamBase.java

private CompletableFuture<TxnStatus> getCompletedTxnStatus(UUID txId) {
    return getCompletedTx(txId).handle((ok, ex) -> {
        if (ex != null && ExceptionHelpers.getRealException(ex) instanceof DataNotFoundException) {
            return TxnStatus.UNKNOWN;
        } else if (ex != null) {
            throw new CompletionException(ex);
        }/*  w  ww .j  a  v  a 2s.c  o m*/
        return CompletedTxnRecord.parse(ok.getData()).getCompletionStatus();
    });
}

From source file:org.apache.flink.runtime.blob.BlobCacheGetTest.java

/**
 * [FLINK-6020] Tests that concurrent get operations don't concurrently access the BlobStore to
 * download a blob.//from  w  ww.  j a v  a 2  s  . com
 *
 * @param jobId
 *       job ID to use (or <tt>null</tt> if job-unrelated)
 * @param blobType
 *       whether the BLOB should become permanent or transient
 * @param cacheAccessesHAStore
 *       whether the cache has access to the {@link BlobServer}'s HA store or not
 */
private void testConcurrentGetOperations(final JobID jobId, final BlobKey.BlobType blobType,
        final boolean cacheAccessesHAStore) throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    final BlobStore blobStoreServer = mock(BlobStore.class);
    final BlobStore blobStoreCache = mock(BlobStore.class);

    final int numberConcurrentGetOperations = 3;
    final List<CompletableFuture<File>> getOperations = new ArrayList<>(numberConcurrentGetOperations);

    final byte[] data = { 1, 2, 3, 4, 99, 42 };

    final ExecutorService executor = Executors.newFixedThreadPool(numberConcurrentGetOperations);

    try (final BlobServer server = new BlobServer(config, blobStoreServer);
            final BlobCacheService cache = new BlobCacheService(config,
                    cacheAccessesHAStore ? blobStoreServer : blobStoreCache,
                    new InetSocketAddress("localhost", server.getPort()))) {

        server.start();

        // upload data first
        final BlobKey blobKey = put(server, jobId, data, blobType);

        // now try accessing it concurrently (only HA mode will be able to retrieve it from HA store!)
        for (int i = 0; i < numberConcurrentGetOperations; i++) {
            CompletableFuture<File> getOperation = CompletableFuture.supplyAsync(() -> {
                try {
                    File file = get(cache, jobId, blobKey);
                    // check that we have read the right data
                    validateGetAndClose(new FileInputStream(file), data);
                    return file;
                } catch (IOException e) {
                    throw new CompletionException(
                            new FlinkException("Could not read blob for key " + blobKey + '.', e));
                }
            }, executor);

            getOperations.add(getOperation);
        }

        FutureUtils.ConjunctFuture<Collection<File>> filesFuture = FutureUtils.combineAll(getOperations);

        if (blobType == PERMANENT_BLOB) {
            // wait until all operations have completed and check that no exception was thrown
            filesFuture.get();
        } else {
            // wait for all futures to complete (do not abort on expected exceptions) and check
            // that at least one succeeded
            int completedSuccessfully = 0;
            for (CompletableFuture<File> op : getOperations) {
                try {
                    op.get();
                    ++completedSuccessfully;
                } catch (Throwable t) {
                    // transient BLOBs get deleted upon first access and only one request will be successful while all others will have an IOException caused by a FileNotFoundException
                    if (!(ExceptionUtils.getRootCause(t) instanceof FileNotFoundException)) {
                        // ignore
                        org.apache.flink.util.ExceptionUtils.rethrowIOException(t);
                    }
                }
            }
            // multiple clients may have accessed the BLOB successfully before it was
            // deleted, but always at least one:
            assertThat(completedSuccessfully, greaterThanOrEqualTo(1));
        }
    } finally {
        executor.shutdownNow();
    }
}

From source file:org.apache.flink.runtime.blob.BlobCachePutTest.java

/**
 * [FLINK-6020]/*  w  ww . j a va 2  s .  c  om*/
 * Tests that concurrent put operations will only upload the file once to the {@link BlobStore}
 * and that the files are not corrupt at any time.
 *
 * @param jobId
 *       job ID to use (or <tt>null</tt> if job-unrelated)
 * @param blobType
 *       whether the BLOB should become permanent or transient
 */
private void testConcurrentPutOperations(@Nullable final JobID jobId, final BlobKey.BlobType blobType)
        throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    final BlobStore blobStoreServer = mock(BlobStore.class);
    final BlobStore blobStoreCache = mock(BlobStore.class);

    int concurrentPutOperations = 2;
    int dataSize = 1024;

    final CountDownLatch countDownLatch = new CountDownLatch(concurrentPutOperations);
    final byte[] data = new byte[dataSize];

    final List<Path> jars;
    if (blobType == PERMANENT_BLOB) {
        // implement via JAR file upload instead:
        File tmpFile = temporaryFolder.newFile();
        FileUtils.writeByteArrayToFile(tmpFile, data);
        jars = Collections.singletonList(new Path(tmpFile.getAbsolutePath()));
    } else {
        jars = null;
    }

    Collection<CompletableFuture<BlobKey>> allFutures = new ArrayList<>(concurrentPutOperations);

    ExecutorService executor = Executors.newFixedThreadPool(concurrentPutOperations);

    try (final BlobServer server = new BlobServer(config, blobStoreServer);
            final BlobCacheService cache = new BlobCacheService(config, blobStoreCache,
                    new InetSocketAddress("localhost", server.getPort()))) {

        server.start();

        // for highAvailability
        final InetSocketAddress serverAddress = new InetSocketAddress("localhost", server.getPort());
        // uploading HA BLOBs works on BlobServer only (and, for now, via the BlobClient)

        for (int i = 0; i < concurrentPutOperations; i++) {
            final Supplier<BlobKey> callable;
            if (blobType == PERMANENT_BLOB) {
                // cannot use a blocking stream here (upload only possible via files)
                callable = () -> {
                    try {
                        List<PermanentBlobKey> keys = BlobClient.uploadFiles(serverAddress, config, jobId,
                                jars);
                        assertEquals(1, keys.size());
                        BlobKey uploadedKey = keys.get(0);
                        // check the uploaded file's contents (concurrently)
                        verifyContents(server, jobId, uploadedKey, data);
                        return uploadedKey;
                    } catch (IOException e) {
                        throw new CompletionException(new FlinkException("Could not upload blob.", e));
                    }
                };

            } else {
                callable = () -> {
                    try {
                        BlockingInputStream inputStream = new BlockingInputStream(countDownLatch, data);
                        BlobKey uploadedKey = put(cache, jobId, inputStream, blobType);
                        // check the uploaded file's contents (concurrently)
                        verifyContents(server, jobId, uploadedKey, data);
                        return uploadedKey;
                    } catch (IOException e) {
                        throw new CompletionException(new FlinkException("Could not upload blob.", e));
                    }
                };
            }
            CompletableFuture<BlobKey> putFuture = CompletableFuture.supplyAsync(callable, executor);

            allFutures.add(putFuture);
        }

        FutureUtils.ConjunctFuture<Collection<BlobKey>> conjunctFuture = FutureUtils.combineAll(allFutures);

        // wait until all operations have completed and check that no exception was thrown
        Collection<BlobKey> blobKeys = conjunctFuture.get();

        Iterator<BlobKey> blobKeyIterator = blobKeys.iterator();

        assertTrue(blobKeyIterator.hasNext());

        BlobKey blobKey = blobKeyIterator.next();

        // make sure that all blob keys are the same
        while (blobKeyIterator.hasNext()) {
            // check for unique BlobKey, but should have same hash
            verifyKeyDifferentHashEquals(blobKey, blobKeyIterator.next());
        }

        // check the uploaded file's contents
        verifyContents(server, jobId, blobKey, data);

        // check that we only uploaded the file once to the blob store
        if (blobType == PERMANENT_BLOB) {
            verify(blobStoreServer, times(1)).put(any(File.class), eq(jobId), eq(blobKey));
        } else {
            // can't really verify much in the other cases other than that the put operations should
            // work and not corrupt files
            verify(blobStoreServer, times(0)).put(any(File.class), eq(jobId), eq(blobKey));
        }
        // caches must not access the blob store (they are not allowed to write there)
        verify(blobStoreCache, times(0)).put(any(File.class), eq(jobId), eq(blobKey));
    } finally {
        executor.shutdownNow();
    }
}

From source file:org.apache.flink.runtime.blob.BlobServerGetTest.java

/**
 * [FLINK-6020] Tests that concurrent get operations don't concurrently access the BlobStore to
 * download a blob.//from w ww  .ja  va2s  . c  o  m
 *
 * @param jobId
 *       job ID to use (or <tt>null</tt> if job-unrelated)
 * @param blobType
 *       whether the BLOB should become permanent or transient
 */
private void testConcurrentGetOperations(@Nullable final JobID jobId, final BlobKey.BlobType blobType)
        throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    final BlobStore blobStore = mock(BlobStore.class);

    final int numberConcurrentGetOperations = 3;
    final List<CompletableFuture<File>> getOperations = new ArrayList<>(numberConcurrentGetOperations);

    final byte[] data = { 1, 2, 3, 4, 99, 42 };

    doAnswer(new Answer() {
        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            File targetFile = (File) invocation.getArguments()[2];

            FileUtils.writeByteArrayToFile(targetFile, data);

            return null;
        }
    }).when(blobStore).get(any(JobID.class), any(BlobKey.class), any(File.class));

    final ExecutorService executor = Executors.newFixedThreadPool(numberConcurrentGetOperations);

    try (final BlobServer server = new BlobServer(config, blobStore)) {

        server.start();

        // upload data first
        final BlobKey blobKey = put(server, jobId, data, blobType);

        // now try accessing it concurrently (only HA mode will be able to retrieve it from HA store!)
        if (blobType == PERMANENT_BLOB) {
            // remove local copy so that a transfer from HA store takes place
            assertTrue(server.getStorageLocation(jobId, blobKey).delete());
        }
        for (int i = 0; i < numberConcurrentGetOperations; i++) {
            CompletableFuture<File> getOperation = CompletableFuture.supplyAsync(() -> {
                try {
                    File file = get(server, jobId, blobKey);
                    // check that we have read the right data
                    validateGetAndClose(new FileInputStream(file), data);
                    return file;
                } catch (IOException e) {
                    throw new CompletionException(
                            new FlinkException("Could not read blob for key " + blobKey + '.', e));
                }
            }, executor);

            getOperations.add(getOperation);
        }

        CompletableFuture<Collection<File>> filesFuture = FutureUtils.combineAll(getOperations);
        filesFuture.get();
    } finally {
        executor.shutdownNow();
    }
}

From source file:org.apache.flink.runtime.blob.BlobServerPutTest.java

/**
 * [FLINK-6020]/*from w  ww .j  ava2  s. c  o m*/
 * Tests that concurrent put operations will only upload the file once to the {@link BlobStore}
 * and that the files are not corrupt at any time.
 *
 * @param jobId
 *       job ID to use (or <tt>null</tt> if job-unrelated)
 * @param blobType
 *       whether the BLOB should become permanent or transient
 */
private void testConcurrentPutOperations(@Nullable final JobID jobId, final BlobKey.BlobType blobType)
        throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    BlobStore blobStore = mock(BlobStore.class);
    int concurrentPutOperations = 2;
    int dataSize = 1024;

    final CountDownLatch countDownLatch = new CountDownLatch(concurrentPutOperations);
    final byte[] data = new byte[dataSize];

    ArrayList<CompletableFuture<BlobKey>> allFutures = new ArrayList<>(concurrentPutOperations);

    ExecutorService executor = Executors.newFixedThreadPool(concurrentPutOperations);

    try (final BlobServer server = new BlobServer(config, blobStore)) {

        server.start();

        for (int i = 0; i < concurrentPutOperations; i++) {
            CompletableFuture<BlobKey> putFuture = CompletableFuture.supplyAsync(() -> {
                try {
                    BlockingInputStream inputStream = new BlockingInputStream(countDownLatch, data);
                    BlobKey uploadedKey = put(server, jobId, inputStream, blobType);
                    // check the uploaded file's contents (concurrently)
                    verifyContents(server, jobId, uploadedKey, data);
                    return uploadedKey;
                } catch (IOException e) {
                    throw new CompletionException(new FlinkException("Could not upload blob.", e));
                }
            }, executor);

            allFutures.add(putFuture);
        }

        FutureUtils.ConjunctFuture<Collection<BlobKey>> conjunctFuture = FutureUtils.combineAll(allFutures);

        // wait until all operations have completed and check that no exception was thrown
        Collection<BlobKey> blobKeys = conjunctFuture.get();

        Iterator<BlobKey> blobKeyIterator = blobKeys.iterator();

        assertTrue(blobKeyIterator.hasNext());

        BlobKey blobKey = blobKeyIterator.next();

        // make sure that all blob keys are the same
        while (blobKeyIterator.hasNext()) {
            verifyKeyDifferentHashEquals(blobKey, blobKeyIterator.next());
        }

        // check the uploaded file's contents
        verifyContents(server, jobId, blobKey, data);

        // check that we only uploaded the file once to the blob store
        if (blobType == PERMANENT_BLOB) {
            verify(blobStore, times(1)).put(any(File.class), eq(jobId), eq(blobKey));
        } else {
            // can't really verify much in the other cases other than that the put operations should
            // work and not corrupt files
            verify(blobStore, times(0)).put(any(File.class), eq(jobId), eq(blobKey));
        }
    } finally {
        executor.shutdownNow();
    }
}

From source file:org.apache.james.jmap.memory.access.MemoryAccessTokenRepository.java

@Override
public CompletableFuture<String> getUsernameFromToken(AccessToken accessToken) throws InvalidAccessToken {
    Preconditions.checkNotNull(accessToken);
    synchronized (tokensExpirationDates) {
        return CompletableFuture.completedFuture(
                Optional.ofNullable(tokensExpirationDates.get(accessToken)).<CompletionException>orElseThrow(
                        () -> new CompletionException(new InvalidAccessToken(accessToken))));
    }//ww w .  j a v  a  2 s .  co m
}

From source file:org.apache.sling.commons.messaging.mail.internal.SimpleMailService.java

private MailResult sendMail(final String message, final String recipient, final Map data,
        final MailBuilder mailBuilder) {
    try {/* w w  w  .j a  v  a  2s .c  o  m*/
        final Email email = mailBuilder.build(message, recipient, data);
        final String messageId = email.send();
        logger.info("mail '{}' sent", messageId);
        final byte[] bytes = MailUtil.toByteArray(email);
        return new MailResult(bytes);
    } catch (EmailException | MessagingException | IOException e) {
        throw new CompletionException(e);
    }
}

From source file:org.eclipse.winery.accountability.storage.swarm.SwarmProviderTest.java

@Test
public void testStorageAndRetrieval() throws ExecutionException, InterruptedException {

    final String dataToStore = "This a string intended for testing!";
    ImmutableStorageProvider swarm = ImmutableStorageProviderFactory
            .getStorageProvider(ImmutableStorageProviderFactory.AvailableImmutableStorages.TEST, null);
    assertNotNull(swarm);/* w  ww .  ja  v  a2  s  .co m*/
    swarm.store(new ByteArrayInputStream(dataToStore.getBytes(StandardCharsets.UTF_8))).thenCompose((hash) -> {
        LOGGER.debug("retrieved hash is: {}", hash);
        return swarm.retrieve(hash);
    }).thenAccept((bytes) -> {
        try {
            String receivedMsg = IOUtils.toString(bytes, StandardCharsets.UTF_8);
            LOGGER.debug("retrieved msg is: {}", receivedMsg);
            assertEquals(dataToStore, receivedMsg);
        } catch (IOException e) {
            throw new CompletionException(e);
        }
    }).get();
}

From source file:org.polymap.p4.layer.FeatureLayer.java

/**
 * Waits for the {@link FeatureLayer} of the given {@link ILayer}.
 * <p/>/* w ww .j  av  a 2s. c  om*/
 * Avoid calling just {@link CompletableFuture#get()} as this may block the
 * calling (UI) thread. Instead register callbacks that handle the result
 * asynchronously.
 * <p/>
 * The callbacks are called from within an {@link UIJob}. Use
 * {@link UIThreadExecutor} to do somethinf in the display thread.
 * <p/>
 * <b>Example usage:</b>
 * <pre>
 *      FeatureLayer.of( layer ).thenAccept( featureLayer -> {
 *          if (featureLayer.isPresent()) {
 *              ...
 *          }
 *          else {
 *              ...
 *          }
 *      })
 *      .exceptionally( e -> {
 *          StatusDispatcher.handleError( "", e );
 *          return null;
 *      });
 * </pre>
 * 
 * @param layer
 */
public static CompletableFuture<Optional<FeatureLayer>> of(ILayer layer) {
    return CompletableFuture.supplyAsync(() -> {
        SessionHolder session = SessionHolder.instance(SessionHolder.class);
        FeatureLayer result = session.instances.computeIfAbsent((String) layer.id(), key -> {
            try {
                IProgressMonitor monitor = UIJob.monitorOfThread();
                return new FeatureLayer(layer).doConnectLayer(monitor);
            } catch (Exception e) {
                throw new CompletionException(e);
            }
        });
        return result.isValid() ? Optional.of(result) : Optional.empty();
    }, JobExecutor.instance());
}

From source file:org.polymap.p4.layer.RasterLayer.java

/**
 * Waits for the {@link RasterLayer} of the given {@link ILayer}.
 * <p/>/*from  w  w w .  ja  va  2s . c  o  m*/
 * Avoid calling just {@link CompletableFuture#get()} as this may block the
 * calling (UI) thread. Instead register callbacks that handle the result
 * asynchronously.
 * <p/>
 * The callbacks are called from within an {@link UIJob}. Use
 * {@link UIThreadExecutor} to do somethinf in the display thread.
 * <p/>
 * <b>Example usage:</b>
 * <pre>
 *      RasterLayer.of( layer ).thenAccept( rl -> {
 *          if (rl.isPresent()) {
 *              ...
 *          }
 *          else {
 *              ...
 *          }
 *      })
 *      .exceptionally( e -> {
 *          StatusDispatcher.handleError( "", e );
 *          return null;
 *      });
 * </pre>
 * 
 * @param layer
 */
public static CompletableFuture<Optional<RasterLayer>> of(ILayer layer) {
    return CompletableFuture.supplyAsync(() -> {
        SessionHolder session = SessionHolder.instance(SessionHolder.class);
        RasterLayer result = session.instances.computeIfAbsent((String) layer.id(), key -> {
            try {
                IProgressMonitor monitor = UIJob.monitorOfThread();
                return new RasterLayer(layer).doConnectLayer(monitor);
            } catch (Exception e) {
                throw new CompletionException(e);
            }
        });
        return result.isValid() ? Optional.of(result) : Optional.empty();
    }, JobExecutor.instance());
}