Example usage for java.util Optional orElse

List of usage examples for java.util Optional orElse

Introduction

In this page you can find the example usage for java.util Optional orElse.

Prototype

public T orElse(T other) 

Source Link

Document

If a value is present, returns the value, otherwise returns other .

Usage

From source file:org.onosproject.store.primitives.impl.EventuallyConsistentMapImpl.java

private MapValue<V> removeInternal(K key, Optional<V> value, Optional<MapValue<V>> tombstone) {
    checkState(!destroyed, destroyedMessage);
    checkNotNull(key, ERROR_NULL_KEY);/*w  w w .j a va2  s  .co  m*/
    checkNotNull(value, ERROR_NULL_VALUE);
    tombstone.ifPresent(v -> checkState(v.isTombstone()));

    counter.incrementCount();
    AtomicBoolean updated = new AtomicBoolean(false);
    AtomicReference<MapValue<V>> previousValue = new AtomicReference<>();
    items.compute(key, (k, existing) -> {
        boolean valueMatches = true;
        if (value.isPresent() && existing != null && existing.isAlive()) {
            valueMatches = Objects.equals(value.get(), existing.get());
        }
        if (existing == null) {
            log.trace("ECMap Remove: Existing value for key {} is already null", k);
        }
        if (valueMatches) {
            if (existing == null) {
                updated.set(tombstone.isPresent());
            } else {
                updated.set(!tombstone.isPresent() || tombstone.get().isNewerThan(existing));
            }
        }
        if (updated.get()) {
            previousValue.set(existing);
            return tombstone.orElse(null);
        } else {
            return existing;
        }
    });
    return previousValue.get();
}

From source file:com.ikanow.aleph2.search_service.elasticsearch.services.TestElasticsearchIndexService.java

public void addRecordToSecondaryBuffer(final DataBucketBean bucket, Optional<String> buffer_name) {
    final ICrudService<JsonNode> buffer_crud = _index_service.getDataService().get()
            .getWritableDataService(JsonNode.class, bucket, Optional.empty(), buffer_name).get()
            .getCrudService().get();//from ww  w  .  j  a v a2  s  .c om

    buffer_crud.storeObject(_mapper.createObjectNode().put("name", buffer_name.orElse("current")));
}

From source file:org.ow2.proactive.connector.iaas.cloud.provider.azure.scaleset.AzureScaleSetProvider.java

@Override
public Set<Instance> createInstance(Infrastructure infrastructure, Instance instance) {

    // Initialize Azure service connector
    Azure azureService = azureServiceCache.getService(infrastructure);

    // Retrieve options and prepare Azure resources creation
    Optional<Options> options = Optional.ofNullable(instance.getOptions());
    genAzureResourcesNames(infrastructure);

    logger.info("Starting creation of Azure Scale set '" + azureScaleSetName + "'.");

    // Retrieve Linux image to be used (but have to comply with the Azure VMSS policy and supported images)
    // see selectLinuxImage() below.
    String imageNameOrId = Optional.ofNullable(instance.getImage()).orElseThrow(
            () -> new RuntimeException("ERROR missing Image name/id from instance: '" + instance + "'"));

    // Retrieve resource group
    String rgName = options.map(Options::getResourceGroup).orElseThrow(() -> new RuntimeException(
            "ERROR unable to find a suitable resourceGroup from instance: '" + instance + "'"));
    ResourceGroup resourceGroup = azureProviderUtils.searchResourceGroupByName(azureService, rgName)
            .orElseThrow(() -> new RuntimeException(
                    "ERROR unable to find a suitable resourceGroup from instance: '" + instance + "'"));

    // Try to get region from provided name, otherwise get it from image
    Region region = options.map(presentOptions -> Region.findByLabelOrName(presentOptions.getRegion()))
            .orElseGet(resourceGroup::region);

    // Get existing virtual private network if specified
    Optional<Network> optionalVirtualNetwork = options.map(Options::getSubnetId)
            .map(subnetId -> azureProviderUtils.searchVirtualNetworkByName(azureService, subnetId).get());

    // Get VM admin user credentials
    String vmAdminUsername = Optional.ofNullable(instance.getCredentials())
            .map(InstanceCredentials::getUsername).orElse(defaultUsername);
    String vmAdminPassword = Optional.ofNullable(instance.getCredentials())
            .map(InstanceCredentials::getPassword).orElse(defaultPassword);
    Optional<String> vmAdminSSHPubKey = Optional.ofNullable(instance.getCredentials())
            .map(InstanceCredentials::getPublicKey);

    // Retrieve number of instances within the Scale Set
    int vmssNbOfInstances = Integer
            .valueOf(Optional.ofNullable(instance.getNumber()).orElse(SINGLE_INSTANCE_NUMBER));

    // Retrieve the customScript URL provided by the node source or throw an Exception otherwise.
    String customScriptUrl = Optional.ofNullable(instance.getCustomScriptUrl())
            .orElseThrow(() -> new RuntimeException("ERROR missing customScript URL."));
    final String scriptName = customScriptUrl.substring(customScriptUrl.lastIndexOf('/') + 1,
            customScriptUrl.length());//from   w ww  . j  a  va 2s .c o m
    final String installCommand = "bash " + scriptName;
    List<String> fileUris = new ArrayList<>();
    fileUris.add(customScriptUrl);

    // Retrieve the provided VNET or create a new one
    Network network = optionalVirtualNetwork.orElse(azureService.networks().define(azureVNetName)
            .withRegion(region).withExistingResourceGroup(resourceGroup).withAddressSpace(vmssNetAddressSpace)
            .defineSubnet(azureSubnetName).withAddressPrefix(vmssNetAddressPrefix).attach().create());

    // Retrieve the provided public IP address or create a new one
    PublicIpAddress publicIPAddress = options.map(Options::getPublicIpAddress)
            .map(publicIpAddresses -> azureProviderUtils
                    .searchPublicIpAddressByIp(azureService, publicIpAddresses).get())
            .orElse(azureService.publicIpAddresses().define(azureIPName).withRegion(region)
                    .withExistingResourceGroup(resourceGroup).withLeafDomainLabel(azureIPName).create());

    // Create a dedicated LB with the required rules
    LoadBalancer lb = createLoadBalancer(azureService, region, resourceGroup, publicIPAddress);

    // Create the Scale Set (multi-stages)
    VirtualMachineScaleSet virtualMachineScaleSet = createVMSS(azureService, region, resourceGroup, instance,
            network, lb, imageNameOrId, vmAdminUsername, vmAdminSSHPubKey, vmAdminPassword, vmssNbOfInstances,
            fileUris, installCommand);

    logger.info(
            "Azure Scale set '" + azureScaleSetName + "'" + " created inside resource group " + resourceGroup);

    // Return the list of VMs of the Scale Set
    return virtualMachineScaleSet.virtualMachines().list().stream()
            .map(vm -> instance.withTag(vm.name()).withId(vm.id()).withNumber(SINGLE_INSTANCE_NUMBER))
            .collect(Collectors.toSet());
}

From source file:org.trellisldp.http.impl.GetHandler.java

/**
 * Build the representation for the given resource
 * @param res the resource/*w  w  w  .jav a2  s  . co  m*/
 * @return the response builder
 */
public ResponseBuilder getRepresentation(final Resource res) {
    final String identifier = getBaseUrl() + req.getPartition() + req.getPath();

    // Check if this is already deleted
    checkDeleted(res, identifier);

    LOGGER.debug("Acceptable media types: {}", req.getHeaders().getAcceptableMediaTypes());
    final Optional<RDFSyntax> syntax = getSyntax(req.getHeaders().getAcceptableMediaTypes(),
            res.getBinary().map(b -> b.getMimeType().orElse(APPLICATION_OCTET_STREAM)));

    if (ACL.equals(req.getExt()) && !res.hasAcl()) {
        throw new NotFoundException();
    }

    final ResponseBuilder builder = basicGetResponseBuilder(res, syntax);

    // Add NonRDFSource-related "describe*" link headers
    res.getBinary().ifPresent(ds -> {
        if (syntax.isPresent()) {
            builder.link(identifier + "#description", "canonical").link(identifier, "describes");
        } else {
            builder.link(identifier, "canonical").link(identifier + "#description", "describedby")
                    .type(ds.getMimeType().orElse(APPLICATION_OCTET_STREAM));
        }
    });

    // Only show memento links for the user-managed graph (not ACL)
    if (!ACL.equals(req.getExt())) {
        builder.link(identifier, "original timegate")
                .links(MementoResource.getMementoLinks(identifier, res.getMementos()).toArray(Link[]::new));
    }

    // URI Template
    builder.header(LINK_TEMPLATE,
            "<" + identifier + "{?version}>; rel=\"" + Memento.Memento.getIRIString() + "\"");

    // NonRDFSources responses (strong ETags, etc)
    if (res.getBinary().isPresent() && !syntax.isPresent()) {
        return getLdpNr(identifier, res, builder);
    }

    // RDFSource responses (weak ETags, etc)
    final RDFSyntax s = syntax.orElse(TURTLE);
    final IRI profile = getProfile(req.getHeaders().getAcceptableMediaTypes(), s);
    return getLdpRs(identifier, res, builder, s, profile);
}

From source file:alfio.manager.TicketReservationManager.java

/**
 * Get the total cost with VAT if it's not included in the ticket price.
 * /*from  www. jav  a2s .c  o  m*/
 * @param reservationId
 * @return
 */
public TotalPrice totalReservationCostWithVAT(String reservationId) {
    TicketReservation reservation = ticketReservationRepository.findReservationById(reservationId);

    Optional<PromoCodeDiscount> promoCodeDiscount = Optional.ofNullable(reservation.getPromoCodeDiscountId())
            .map(promoCodeDiscountRepository::findById);

    Event event = eventRepository.findByReservationId(reservationId);
    List<Ticket> tickets = ticketRepository.findTicketsInReservation(reservationId);

    return totalReservationCostWithVAT(promoCodeDiscount.orElse(null), event, reservation.getVatStatus(),
            tickets, collectAdditionalServiceItems(reservationId, event));
}

From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_LibraryJars.java

/** Builds a V2 library bean out of a V1 share
 * @param src_json//  ww w  .j av  a  2  s  . c  o  m
 * @return
 * @throws JsonParseException
 * @throws JsonMappingException
 * @throws IOException
 * @throws ParseException
 */
protected static SharedLibraryBean getLibraryBeanFromV1Share(final JsonNode src_json)
        throws JsonParseException, JsonMappingException, IOException, ParseException {

    final String[] description_lines = Optional.ofNullable(safeJsonGet("description", src_json).asText())
            .orElse("unknown").split("\r\n?|\n");

    final String _id = "v1_" + safeJsonGet(JsonUtils._ID, src_json).asText();
    final String created = safeJsonGet("created", src_json).asText();
    final String modified = safeJsonGet("modified", src_json).asText();
    final String display_name = safeJsonGet("title", src_json).asText();
    final String path_name = display_name;

    final List<String> description_lines_list = Arrays.asList(description_lines);

    // Find possible JSON config
    Optional<Tuple2<Integer, Integer>> json_config = IntStream.range(1, description_lines.length).boxed()
            .filter(i -> description_lines[i].trim().startsWith("{")).findFirst()
            .<Tuple2<Integer, Integer>>map(start -> {
                return IntStream.range(start + 1, description_lines.length).boxed()
                        .filter(i -> !description_lines[i].matches("^\\s*[{}\"'].*")).findFirst()
                        .<Tuple2<Integer, Integer>>map(end -> Tuples._2T(start, end))
                        .orElse(Tuples._2T(start, description_lines.length));
            });

    @SuppressWarnings("unchecked")
    final Optional<Map<String, Object>> json = json_config
            .map(t2 -> description_lines_list.stream().limit(t2._2()).skip(t2._1())
                    .collect(Collectors.joining("\n")))
            .map(Lambdas.wrap_u(s -> _mapper.readTree(s)))
            .<Map<String, Object>>map(j -> (Map<String, Object>) _mapper.convertValue(j, Map.class));
    ;

    final Set<String> tags = safeTruncate(description_lines[description_lines.length - 1], 5).toLowerCase()
            .startsWith("tags:")
                    ? new HashSet<String>(Arrays.asList(description_lines[description_lines.length - 1]
                            .replaceFirst("(?i)tags:\\s*", "").split("\\s*,\\s*")))
                    : Collections.emptySet();

    final String description = description_lines_list.stream()
            .limit(Optional.of(description_lines.length).map(n -> tags.isEmpty() ? n : n - 1) // skip over the tags if any
                    .get())
            .skip(json_config.map(Tuple2::_2).orElse(1)).collect(Collectors.joining("\n"));

    final LibraryType type = LibraryType.misc_archive;
    final String owner_id = safeJsonGet(JsonUtils._ID, safeJsonGet("owner", src_json)).asText();
    //final JsonNode comm_objs = safeJsonGet("communities", src_json); // collection of { _id: $oid } types
    final String misc_entry_point = description_lines[0];

    final SharedLibraryBean bean = BeanTemplateUtils.build(SharedLibraryBean.class)
            .with(SharedLibraryBean::_id, _id).with(SharedLibraryBean::created, parseJavaDate(created))
            .with(SharedLibraryBean::modified, parseJavaDate(modified))
            .with(SharedLibraryBean::display_name, display_name).with(SharedLibraryBean::path_name, path_name)
            .with(SharedLibraryBean::description, description).with(SharedLibraryBean::tags, tags)
            .with(SharedLibraryBean::type, type).with(SharedLibraryBean::misc_entry_point, misc_entry_point)
            .with(SharedLibraryBean::owner_id, owner_id)
            .with(SharedLibraryBean::library_config, json.orElse(null)).done().get();

    return bean;
}

From source file:org.ow2.proactive.connector.iaas.cloud.provider.vmware.VMWareProvider.java

private VirtualMachineRelocateSpec inferRelocateSpecsFromImageArgument(String image, Folder rootFolder) {

    Optional<ResourcePool> destinationPool = Optional.empty();
    Optional<HostSystem> destinationHost = Optional.empty();
    Optional<Datastore> destinationDatastore = Optional.empty();

    if (isMultiPartImage(image)) {
        String hostname = image.split(IMAGE_DELIMITER)[1];
        if (hostname.equals(RANDOM_HOST)) {
            destinationPool = vmWareProviderVirtualMachineUtil.getRandomResourcePool(rootFolder);
            if (destinationPool.isPresent()) {
                destinationDatastore = vmWareProviderVirtualMachineUtil
                        .getDatastoreWithMostSpaceFromPool(destinationPool.get());
            }// w  ww .  ja  va  2  s .  c o  m
        } else {
            destinationPool = vmWareProviderVirtualMachineUtil.searchResourcePoolByHostname(hostname,
                    rootFolder);
            destinationHost = vmWareProviderVirtualMachineUtil.searchHostByName(hostname, rootFolder);
            if (destinationHost.isPresent()) {
                destinationDatastore = vmWareProviderVirtualMachineUtil
                        .getDatastoreWithMostSpaceFromHost(destinationHost.get());
            }
        }
    }

    VirtualMachine vmToClone = getVirtualMachineByNameOrUUID(getInstanceIdFromImage(image), rootFolder);

    return generateCustomRelocateSpecs(destinationPool.orElseGet(() -> {
        try {
            return vmToClone.getResourcePool();
        } catch (RemoteException e) {
            throw new RuntimeException("Unable to retrieve destination resource pool for VM:");
        }
    }), destinationHost.orElse(null), destinationDatastore.orElse(null));
}

From source file:io.atomix.protocols.gossip.map.AntiEntropyMapDelegate.java

private MapValue removeInternal(String key, Optional<byte[]> value, Optional<MapValue> tombstone) {
    checkState(!closed, destroyedMessage);
    checkNotNull(key, ERROR_NULL_KEY);/*w  ww.  jav a  2  s.c  o m*/
    checkNotNull(value, ERROR_NULL_VALUE);
    tombstone.ifPresent(v -> checkState(v.isTombstone()));

    counter.incrementCount();
    AtomicBoolean updated = new AtomicBoolean(false);
    AtomicReference<MapValue> previousValue = new AtomicReference<>();
    items.compute(key, (k, existing) -> {
        boolean valueMatches = true;
        if (value.isPresent() && existing != null && existing.isAlive()) {
            valueMatches = Arrays.equals(value.get(), existing.get());
        }
        if (existing == null) {
            log.trace("ECMap Remove: Existing value for key {} is already null", k);
        }
        if (valueMatches) {
            if (existing == null) {
                updated.set(tombstone.isPresent());
            } else {
                updated.set(!tombstone.isPresent() || tombstone.get().isNewerThan(existing));
            }
        }
        if (updated.get()) {
            previousValue.set(existing);
            return tombstone.orElse(null);
        } else {
            return existing;
        }
    });
    return previousValue.get();
}

From source file:com.ikanow.aleph2.data_import.services.HarvestContext.java

@Override
public String getHarvestContextSignature(final Optional<DataBucketBean> bucket,
        final Optional<Set<Tuple2<Class<? extends IUnderlyingService>, Optional<String>>>> services) {
    if (_state_name == State.IN_TECHNOLOGY) {
        // Returns a config object containing:
        // - set up for any of the services described
        // - all the rest of the configuration
        // - the bucket bean ID

        final Config full_config = ModuleUtils.getStaticConfig()
                .withoutPath(DistributedServicesPropertyBean.APPLICATION_NAME)
                .withoutPath("MongoDbManagementDbService.v1_enabled") // (special workaround for V1 sync service)
        ;/*from w ww  .  j  a va  2  s.  c  o m*/

        final Optional<Config> service_config = PropertiesUtils.getSubConfig(full_config, "service");

        final Optional<DataBucketBean> maybe_bucket = bucket.map(Optional::of)
                .orElseGet(() -> _mutable_state.bucket.optional());

        final ImmutableSet<Tuple2<Class<? extends IUnderlyingService>, Optional<String>>> complete_services_set = Optional
                .of(ImmutableSet.<Tuple2<Class<? extends IUnderlyingService>, Optional<String>>>builder()
                        .addAll(services.orElse(Collections.emptySet()))
                        .add(Tuples._2T(ICoreDistributedServices.class, Optional.empty()))
                        .add(Tuples._2T(IManagementDbService.class, Optional.empty()))
                        .add(Tuples._2T(IStorageService.class, Optional.empty()))
                        .add(Tuples._2T(ISecurityService.class, Optional.empty()))
                        .add(Tuples._2T(ILoggingService.class, Optional.empty())) //doesn't pull in ES via getUnderlyingArtefacts, relies on the one here
                        .add(Tuples._2T(IManagementDbService.class, IManagementDbService.CORE_MANAGEMENT_DB)))
                // Optional services:
                //TODO (ALEPH-19): 1) should port this across to the more comprehensive/centralized CSL, 2) Do I need a "support direct output" flag, and not do this if not set?
                // seems like a waste to stick these JARs on the classpath when the Harvester is normally only writing to real-time/file-based queue?
                // (see AnalyticsContext/DataServiceUtils for more details on point #1)
                .map(sb -> (maybe_bucket.map(b -> hasSearchIndexOutput(b)).orElse(false))
                        ? sb.add(Tuples._2T(ISearchIndexService.class, Optional.empty()))
                                .add(Tuples._2T(ITemporalService.class, Optional.empty())).add(
                                        Tuples._2T(IColumnarService.class, Optional.empty()))
                        : sb)
                .map(sb -> (maybe_bucket.map(b -> hasDocumentOutput(b)).orElse(false))
                        ? sb.add(Tuples._2T(IDocumentService.class, Optional.empty()))
                        : sb)
                .map(sb -> sb.build()).get();

        final Config config_no_services = full_config.withoutPath("service");

        if (_mutable_state.service_manifest_override.isSet()) {
            if (!complete_services_set.equals(_mutable_state.service_manifest_override.get())) {
                throw new RuntimeException(ErrorUtils.SERVICE_RESTRICTIONS);
            }
        } else {
            _mutable_state.service_manifest_override.set(complete_services_set);
        }

        // Ugh need to add: core deps, core + underlying management db to this list

        final Config service_defn_subset = complete_services_set.stream() // DON'T MAKE PARALLEL SEE BELOW
                .map(clazz_name -> {
                    final String config_path = clazz_name._2()
                            .orElse(clazz_name._1().getSimpleName().substring(1));
                    return Lambdas
                            .wrap_u(__ -> service_config.get().hasPath(config_path)
                                    ? Tuples._2T(config_path, service_config.get().getConfig(config_path))
                                    : null)
                            //(could add extra transforms here if we wanted)
                            .apply(Unit.unit());
                }).filter(cfg -> null != cfg).reduce(ConfigFactory.empty(),
                        (acc, k_v) -> acc.withValue(k_v._1(), k_v._2().root()), (acc1, acc2) -> acc1 // (This will never be called as long as the above stream is not parallel)
        );

        // Service configuration:
        final Config service_cfgn_subset = _mutable_state.service_manifest_override.get().stream() // DON'T MAKE PARALLEL SEE BELOW
                .reduce(config_no_services, // (leave other configurations, we just transform service specific configuration)
                        (acc, clazz_name) -> {
                            final Optional<? extends IUnderlyingService> underlying_service = _service_context
                                    .getService(clazz_name._1(), clazz_name._2());
                            return underlying_service.map(ds -> ds.createRemoteConfig(bucket, acc)).orElse(acc);
                        }, (acc1, acc2) -> acc1 // (This will never be called as long as the above stream is not parallel)
        );

        final Config config_subset_services = service_cfgn_subset.withValue("service",
                service_defn_subset.root());

        final Config last_call = Lambdas
                .get(() -> _mutable_state.library_configs.isSet()
                        ? config_subset_services
                                .withValue(__MY_MODULE_LIBRARY_ID,
                                        ConfigValueFactory
                                                .fromAnyRef(BeanTemplateUtils
                                                        .toJson(new LibraryContainerBean(
                                                                _mutable_state.library_configs.get().entrySet()
                                                                        .stream()
                                                                        .filter(kv -> kv.getValue().path_name()
                                                                                .equals(kv.getKey()))
                                                                        .map(kv -> kv.getValue())
                                                                        .collect(Collectors.toList())))
                                                        .toString()))
                        : config_subset_services)
                .withValue(__MY_BUCKET_ID,
                        ConfigValueFactory.fromAnyRef(
                                maybe_bucket.map(b -> BeanTemplateUtils.toJson(b).toString()).orElse("{}")))
                .withValue(__MY_TECH_LIBRARY_ID, ConfigValueFactory.fromAnyRef(_mutable_state.technology_config
                        .optional().map(l -> BeanTemplateUtils.toJson(l).toString()).orElse("{}")));

        return this.getClass().getName() + ":" + last_call.root().render(ConfigRenderOptions.concise());
    } else {
        throw new RuntimeException(ErrorUtils.TECHNOLOGY_NOT_MODULE);
    }
}

From source file:com.streamsets.pipeline.stage.origin.http.HttpClientSource.java

/** {@inheritDoc} */
@Override/*from   ww w . j  ava 2 s .c om*/
public String produce(String lastSourceOffset, int maxBatchSize, BatchMaker batchMaker) throws StageException {
    long start = System.currentTimeMillis();
    int chunksToFetch = Math.min(conf.basic.maxBatchSize, maxBatchSize);
    Optional<String> newSourceOffset = Optional.empty();
    recordCount = 0;

    setPageOffset(lastSourceOffset);

    setResolvedUrl(resolveInitialUrl(lastSourceOffset));
    WebTarget target = client.target(getResolvedUrl());

    // If the request (headers or body) contain a known sensitive EL and we're not using https then fail the request.
    if (requestContainsSensitiveInfo() && !target.getUri().getScheme().toLowerCase().startsWith("https")) {
        LOG.error(Errors.HTTP_07.getMessage());
        throw new StageException(Errors.HTTP_07);
    }

    boolean uninterrupted = true;

    while (!waitTimeExpired(start) && uninterrupted && (recordCount < chunksToFetch)) {
        if (parser != null) {
            // We already have an response that we haven't finished reading.
            newSourceOffset = Optional.of(parseResponse(start, chunksToFetch, batchMaker));
        } else if (shouldMakeRequest()) {

            if (conf.pagination.mode != PaginationMode.NONE) {
                target = client.target(resolveNextPageUrl(newSourceOffset.orElse(null)));
                // Pause between paging requests so we don't get rate limited.
                uninterrupted = ThreadUtil.sleep(conf.pagination.rateLimit);
            }

            makeRequest(target);
            if (lastRequestTimedOut) {
                String actionName = conf.responseTimeoutActionConfig.getAction().name();
                LOG.warn(
                        "HTTPClient timed out after waiting {} ms for response from server;"
                                + " reconnecting client and proceeding as per configured {} action",
                        conf.client.readTimeoutMillis, actionName);
                reconnectClient();
                return nonTerminating(lastSourceOffset);
            } else {
                newSourceOffset = processResponse(start, chunksToFetch, batchMaker);
            }
        } else if (conf.httpMode == HttpClientMode.BATCH) {
            // We are done.
            return null;
        } else {
            // In polling mode, waiting for the next polling interval.
            uninterrupted = ThreadUtil.sleep(SLEEP_TIME_WAITING_FOR_BATCH_SIZE_MS);
        }
    }

    return newSourceOffset.orElse(lastSourceOffset);
}