Example usage for java.util.function Supplier get

List of usage examples for java.util.function Supplier get

Introduction

In this page you can find the example usage for java.util.function Supplier get.

Prototype

T get();

Source Link

Document

Gets a result.

Usage

From source file:com.example.app.profile.model.ProfileDAO.java

/**
 * Get the MembershipType whose ProgrammaticIdentifier corresponds to the given ProgrammaticIdentifier.
 * If one does not exist, it is created and persisted.
 *
 * @param profileType the ProfileType that owns the MembershipType to search for
 * @param programmaticId the programmatic identifier to search for
 * @param nameSupplier a supplier for the MembershipType's name
 * @param defaultOperationsSupplier a supplier for the MembershipOperations for the MembershipType
 *
 * @return a matching MembershipType, or a newly persisted one.
 *//*  w ww . j  a va  2  s  . c o m*/
@Nonnull
public MembershipType getMembershipTypeOrNew(@Nonnull ProfileType profileType, @Nonnull String programmaticId,
        @Nonnull Supplier<LocalizedObjectKey> nameSupplier,
        @Nonnull Supplier<List<MembershipOperation>> defaultOperationsSupplier) {
    MembershipType mt = getMembershipType(profileType, programmaticId).orElseGet(() -> {
        ProfileType pt = _er.reattachIfNecessary(profileType);
        MembershipType membershipType = new MembershipType();
        membershipType.setProfileType(pt);
        membershipType.setName(nameSupplier.get());
        membershipType.setProgrammaticIdentifier(programmaticId);
        pt.getMembershipTypeSet().add(membershipType);
        pt = mergeProfileType(pt);
        return getMembershipType(pt, programmaticId).orElseThrow(
                () -> new IllegalStateException("Unable to find MembershipType even after it was persisted."));
    });
    mt.setDefaultOperations(defaultOperationsSupplier.get());
    return mergeMembershipType(mt);
}

From source file:com.evolveum.midpoint.common.refinery.RefinedObjectClassDefinitionImpl.java

private <X> RefinedAttributeDefinition<X> substituteRefinedAttributeDefinition(
        Supplier<RefinedAttributeDefinition<X>> getter, Consumer<RefinedAttributeDefinition<X>> setter,
        Supplier<ResourceAttributeDefinition<X>> getterOfOriginal) {
    RefinedAttributeDefinition<X> value = getter.get();
    if (value == null) {
        ResourceAttributeDefinition original = getterOfOriginal.get();
        if (original == null) {
            return null;
        }/*from  www.ja  v a  2  s .c o m*/
        value = findAttributeDefinition(original.getName());
        setter.accept(value);
    }
    return value;
}

From source file:com.joyent.manta.http.EncryptionHttpHelper.java

/**
 * Builds a {@link Map} of decrypted metadata keys and values.
 *
 * @param encryptionType encryption type header value
 * @param metadataIvBase64 metadata ciphertext iv header value
 * @param metadataCiphertextBase64 metadata ciphertext header value
 * @param hmacId hmac identifier header value
 * @param metadataHmacBase64 metadata hmac header value
 * @param request http request object/*from ww  w .  ja v  a 2s  . com*/
 * @param response http response object
 * @return decrypted map of encrypted metadata
 */
@SuppressWarnings("ParameterNumber")
private Map<String, String> buildEncryptedMetadata(final String encryptionType, final String metadataIvBase64,
        final String metadataCiphertextBase64, final String hmacId, final String metadataHmacBase64,
        final HttpRequest request, final HttpResponse response) {
    try {
        EncryptionType.validateEncryptionTypeIsSupported(encryptionType);
    } catch (MantaClientEncryptionException e) {
        HttpHelper.annotateContextedException(e, request, response);
        throw e;
    }

    final byte[] metadataIv = Base64.getDecoder().decode(metadataIvBase64);
    final Cipher metadataCipher = buildMetadataDecryptCipher(metadataIv);

    if (metadataCiphertextBase64 == null) {
        String msg = "No encrypted metadata stored on object";
        MantaClientEncryptionException e = new MantaClientEncryptionException(msg);
        HttpHelper.annotateContextedException(e, request, response);
        throw e;
    }

    final byte[] metadataCipherText = Base64.getDecoder().decode(metadataCiphertextBase64);

    // Validate Hmac if we aren't using AEAD
    if (!cipherDetails.isAEADCipher()) {
        if (hmacId == null) {
            String msg = "No HMAC algorithm specified for metadata ciphertext authentication";
            MantaClientEncryptionException e = new MantaClientEncryptionException(msg);
            HttpHelper.annotateContextedException(e, request, response);
            throw e;
        }

        Supplier<HMac> hmacSupplier = SupportedHmacsLookupMap.INSTANCE.get(hmacId);
        if (hmacSupplier == null) {
            String msg = String.format("Unsupported HMAC specified: %s", hmacId);
            MantaClientEncryptionException e = new MantaClientEncryptionException(msg);
            HttpHelper.annotateContextedException(e, request, response);
            throw e;
        }

        final HMac hmac = hmacSupplier.get();
        initHmac(this.secretKey, hmac);
        hmac.update(metadataCipherText, 0, metadataCipherText.length);

        byte[] actualHmac = new byte[hmac.getMacSize()];
        hmac.doFinal(actualHmac, 0);

        if (metadataHmacBase64 == null) {
            String msg = "No metadata HMAC is available to authenticate metadata ciphertext";
            MantaClientEncryptionException e = new MantaClientEncryptionException(msg);
            HttpHelper.annotateContextedException(e, request, response);
            throw e;
        }

        byte[] expectedHmac = Base64.getDecoder().decode(metadataHmacBase64);

        if (!Arrays.equals(expectedHmac, actualHmac)) {
            String msg = "The expected HMAC value for metadata ciphertext didn't equal the actual value";
            MantaClientEncryptionException e = new MantaClientEncryptionException(msg);
            HttpHelper.annotateContextedException(e, request, null);
            e.setContextValue("expected", Hex.encodeHexString(expectedHmac));
            e.setContextValue("actual", Hex.encodeHexString(actualHmac));
            throw e;
        }
    }

    byte[] plaintext = decryptMetadata(metadataCipherText, metadataCipher);
    return EncryptedMetadataUtils.plaintextMetadataAsMap(plaintext);
}

From source file:com.hortonworks.registries.schemaregistry.webservice.SchemaRegistryResource.java

/**
 * Checks whether the current instance is a leader. If so, it invokes the given {@code supplier}, else current
 * request is redirected to the leader node in registry cluster.
 *
 * @param uriInfo/*from w  w w  .  j  a v  a 2  s  . com*/
 * @param supplier
 * @return
 */
private Response handleLeaderAction(UriInfo uriInfo, Supplier<Response> supplier) {
    LOG.info("URI info [{}]", uriInfo.getRequestUri());
    if (!leadershipParticipant.get().isLeader()) {
        URI location = null;
        try {
            String currentLeaderLoc = leadershipParticipant.get().getCurrentLeader();
            URI leaderServerUrl = new URI(currentLeaderLoc);
            URI requestUri = uriInfo.getRequestUri();
            location = new URI(leaderServerUrl.getScheme(), leaderServerUrl.getAuthority(),
                    requestUri.getPath(), requestUri.getQuery(), requestUri.getFragment());
            LOG.info("Redirecting to URI [{}] as this instance is not the leader", location);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
        return Response.temporaryRedirect(location).build();
    } else {
        LOG.info("Invoking here as this instance is the leader");
        return supplier.get();
    }
}

From source file:com.ikanow.aleph2.data_import_manager.analytics.actors.TestAnalyticsTriggerWorkerActor.java

protected void waitForData(Supplier<Long> getCount, long exit_value, boolean ascending) {
    int ii = 0;/*  w ww .  j a v a  2  s.c o  m*/
    long curr_val = -1;
    for (; ii < 10; ++ii) {
        curr_val = getCount.get();
        if (ascending && (curr_val >= exit_value))
            break;
        else if (!ascending && (curr_val <= exit_value))
            break;
        try {
            Thread.sleep(500L);
        } catch (Exception e) {
        }
    }
    System.out.println("(Waited " + ii / 2 + " (secs) for count=" + curr_val + ")");
}

From source file:org.hyperledger.jackson.SupernodeModule.java

public SupernodeModule(Supplier<Boolean> production) {
    super("Supernode");

    this.formatter = HyperLedgerSettings.getInstance().getTxWireFormatter();

    addDeserializer(MasterPrivateKey.class, new MasterPrivateKeyDeserializer());
    addDeserializer(MasterPublicKey.class, new MasterPublicKeyDeserializer());
    addDeserializer(Script.class, new ScriptDeserializer());
    addDeserializer(UIAddress.class, new AddressDeserializer());
    addDeserializer(Transaction.class, new TransactionDeserializer(formatter));
    addDeserializer(Hash.class, new HashDeserializer());
    addDeserializer(TID.class, new TIDDeserializer());
    addDeserializer(BID.class, new BIDDeserializer());

    addSerializer(MasterPrivateKey.class, new MasterPrivateKeySerializer());
    addSerializer(MasterPublicKey.class, new MasterPublicKeySerializer());
    addSerializer(Script.class, new ScriptSerializer());
    addSerializer(UIAddress.class, new AddressSerializer());
    addSerializer(Transaction.class, new TransactionSerializer());
    addSerializer(Outpoint.class, new OutpointSerializer());
    addSerializer(Hash.class, new HashSerializer());
    addSerializer(TID.class, new TIDSerializer());
    addSerializer(BID.class, new BIDSerializer());

    this.setSerializerModifier(new BeanSerializerModifier() {
        @Override/*from   w w  w.ja  va 2  s .  c o m*/
        public JsonSerializer<?> modifySerializer(SerializationConfig config, BeanDescription beanDesc,
                JsonSerializer<?> serializer) {
            if (serializer instanceof MasterPublicKeySerializer) {
                MasterPublicKeySerializer.class.cast(serializer).setProduction(production.get());
            }
            if (serializer instanceof MasterPrivateKeySerializer) {
                MasterPrivateKeySerializer.class.cast(serializer).setProduction(production.get());
            }

            return serializer;
        }
    });
}

From source file:com.ikanow.aleph2.shared.crud.elasticsearch.services.ElasticsearchCrudService.java

@Override
public CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>> storeObjects(final List<O> new_objects,
        final boolean replace_if_present) {
    try {/* w  w w .  ja v a  2s.com*/
        final ReadWriteContext rw_context = getRwContextOrThrow(_state.es_context, "storeObjects");

        final BulkRequestBuilder brb = new_objects.stream()
                .reduce(_state.client.prepareBulk().setConsistencyLevel(WriteConsistencyLevel.ONE)
                        .setRefresh(CreationPolicy.AVAILABLE_IMMEDIATELY == _state.creation_policy),
                        (acc, val) -> acc.add(singleObjectIndexRequest(Either.left(rw_context),
                                Either.left(val), replace_if_present, true)),
                        (acc1, acc2) -> {
                            throw new RuntimeException("Internal logic error - Parallel not supported");
                        });

        final BiConsumer<BulkResponse, CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>>> action_handler = new BiConsumer<BulkResponse, CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>>>() {
            // WARNING: mutable/imperative code ahead...
            long _curr_written = 0;
            List<Object> _id_list = null;
            HashMap<String, String> _mapping_failures = null;

            @Override
            public void accept(final BulkResponse result,
                    final CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>> future) {

                if (result.hasFailures() && (rw_context
                        .typeContext() instanceof ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext)) {
                    final ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext auto_context = (ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext) rw_context
                            .typeContext();
                    // Recursive builder in case I need to build a second batch of docs                        
                    BulkRequestBuilder brb2 = null;

                    if (null == _id_list) {
                        _id_list = new LinkedList<Object>();
                    }
                    HashMap<String, String> temp_mapping_failures = null;
                    final Iterator<BulkItemResponse> it = result.iterator();
                    while (it.hasNext()) {
                        final BulkItemResponse bir = it.next();
                        if (bir.isFailed()) {
                            if (bir.getFailure().getMessage().startsWith("MapperParsingException")) {
                                final Set<String> fixed_type_fields = rw_context.typeContext()
                                        .fixed_type_fields();
                                if (!fixed_type_fields.isEmpty()) {
                                    // Obtain the field name from the exception (if we fail then drop the record) 
                                    final String field = getFieldFromParsingException(
                                            bir.getFailure().getMessage());
                                    if ((null == field) || fixed_type_fields.contains(field)) {
                                        continue;
                                    }
                                } //(else roll on to...)                                                

                                // OK this is the case where I might be able to apply auto types:
                                if (null == brb2) {
                                    brb2 = _state.client.prepareBulk()
                                            .setConsistencyLevel(WriteConsistencyLevel.ONE).setRefresh(
                                                    CreationPolicy.AVAILABLE_IMMEDIATELY == _state.creation_policy);
                                }
                                String failed_json = null;
                                if (null == _mapping_failures) { // first time through, use item id to grab the objects from the original request
                                    if (null == temp_mapping_failures) {
                                        temp_mapping_failures = new HashMap<String, String>();
                                    }
                                    final ActionRequest<?> ar = brb.request().requests().get(bir.getItemId());
                                    if (ar instanceof IndexRequest) {
                                        IndexRequest ir = (IndexRequest) ar;
                                        failed_json = ir.source().toUtf8();
                                        temp_mapping_failures.put(bir.getId(), failed_json);
                                    }
                                } else { // have already grabbed all the failure _ids and stuck in a map
                                    failed_json = _mapping_failures.get(bir.getId());
                                }
                                if (null != failed_json) {
                                    brb2.add(singleObjectIndexRequest(
                                            Either.right(Tuples._2T(bir.getIndex(),
                                                    ElasticsearchContextUtils.getNextAutoType(
                                                            auto_context.getPrefix(), bir.getType()))),
                                            Either.right(Tuples._2T(bir.getId(), failed_json)), false, true));
                                }
                            }
                            // Ugh otherwise just silently fail I guess? 
                            //(should I also look for transient errors and resubmit them after a pause?!)
                        } else { // (this item worked)
                            _id_list.add(bir.getId());
                            _curr_written++;
                        }
                    }
                    if (null != brb2) { // found mapping errors to retry with
                        if (null == _mapping_failures) // (first level of recursion)
                            _mapping_failures = temp_mapping_failures;

                        // (note that if brb2.request().requests().isEmpty() this is an internal logic error, so it's OK to throw)
                        ElasticsearchFutureUtils.wrap(brb2.execute(), future, this, (error, future2) -> {
                            future2.completeExceptionally(error);
                        });
                    } else { // relative success, plus we've built the list anyway
                        future.complete(Tuples._2T(() -> _id_list, () -> (Long) _curr_written));
                    }
                } else { // No errors with this iteration of the bulk request         
                    _curr_written += result.getItems().length;

                    if (null == _id_list) { // This is the first bulk request, no recursion on failures, so can lazily create the list in case it isn't needed
                        final Supplier<List<Object>> get_objects = () -> {
                            return StreamSupport.stream(result.spliterator(), false)
                                    .filter(bir -> !bir.isFailed()).map(bir -> bir.getId())
                                    .collect(Collectors.toList());
                        };
                        final Supplier<Long> get_count_workaround = () -> {
                            return StreamSupport.stream(result.spliterator(), false)
                                    .filter(bir -> !bir.isFailed()).collect(Collectors.counting());
                        };
                        get_count_workaround.get();
                        future.complete(Tuples._2T(get_objects, get_count_workaround));
                    } else { // have already calculated everything so just return it                     
                        future.complete(Tuples._2T(() -> _id_list, () -> (Long) _curr_written));
                    }
                }
            }
        };

        return ElasticsearchFutureUtils.wrap(brb.execute(),
                new CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>>(), action_handler,
                (error, future) -> {
                    future.completeExceptionally(error);
                });
    } catch (Exception e) {
        return FutureUtils.returnError(e);
    }
}

From source file:com.example.app.profile.model.ProfileDAO.java

/**
 * Get the MembershipType whose ProgrammaticIdentifier corresponds to the given ProgrammaticIdentifier.
 * If one does not exist, it is created and persisted.
 *
 * @param profileType the ProfileType that owns the MembershipType to search for
 * @param info the membershiptype info/*w w  w.ja v a  2  s  . co  m*/
 * @param defaultOperationsSupplier a supplier for the MembershipOperations for the MembershipType
 *
 * @return a matching MembershipType, or a newly persisted one.
 */
@Nonnull
public MembershipType getMembershipTypeOrNew(@Nonnull ProfileType profileType, MembershipTypeInfo info,
        @Nonnull Supplier<List<MembershipOperation>> defaultOperationsSupplier) {
    MembershipType mt = getMembershipType(profileType, info.getProgId()).orElseGet(() -> {
        ProfileType pt = _er.reattachIfNecessary(profileType);
        MembershipType membershipType = new MembershipType();
        membershipType.setProfileType(pt);
        membershipType.setName(info.getNewNameLocalizedObjectKey());
        membershipType.setProgrammaticIdentifier(info.getProgId());
        pt.getMembershipTypeSet().add(membershipType);
        pt = mergeProfileType(pt);
        return getMembershipType(pt, info.getProgId()).orElseThrow(
                () -> new IllegalStateException("Unable to find MembershipType even after it was persisted."));
    });
    mt.setDefaultOperations(defaultOperationsSupplier.get());
    return mergeMembershipType(mt);
}

From source file:io.pravega.segmentstore.server.writer.SegmentAggregatorTests.java

private <T extends Throwable> FlushResult tryFlushSegment(SegmentAggregator aggregator,
        Supplier<T> exceptionProvider, Executor executor) {
    try {//from  w ww.j  a  v a  2 s  . c o  m
        FlushResult flushResult = aggregator.flush(TIMEOUT, executor).get(TIMEOUT.toMillis(),
                TimeUnit.MILLISECONDS);
        T expectedException = exceptionProvider.get();
        Assert.assertNull("Expected an exception but none got thrown.", expectedException);
        Assert.assertNotNull("Expected a FlushResult.", flushResult);
        return flushResult;
    } catch (Throwable ex) {
        ex = ExceptionHelpers.getRealException(ex);
        T expectedException = exceptionProvider.get();
        Assert.assertEquals("Unexpected exception or no exception got thrown.", expectedException, ex);
        return null;
    }
}