Example usage for java.util Optional orElseGet

List of usage examples for java.util Optional orElseGet

Introduction

In this page you can find the example usage for java.util Optional orElseGet.

Prototype

public T orElseGet(Supplier<? extends T> supplier) 

Source Link

Document

If a value is present, returns the value, otherwise returns the result produced by the supplying function.

Usage

From source file:com.jvms.i18neditor.editor.Editor.java

public void importProject(Path dir, boolean showEmptyProjectError) {
    try {/*from   ww  w. j  av a  2  s .c  o  m*/
        Preconditions.checkArgument(Files.isDirectory(dir));

        if (!closeCurrentProject()) {
            return;
        }

        clearUI();
        project = new EditorProject(dir);
        restoreProjectState(project);

        Optional<ResourceType> type = Optional.ofNullable(project.getResourceType());
        List<Resource> resourceList = Resources.get(dir, project.getResourceName(), type);
        Map<String, String> keys = Maps.newTreeMap();

        if (resourceList.isEmpty()) {
            project = null;
            if (showEmptyProjectError) {
                executor.execute(() -> showError(MessageBundle.get("resources.import.empty", dir)));
            }
        } else {
            project.setResourceType(type.orElseGet(() -> {
                ResourceType t = resourceList.get(0).getType();
                resourceList.removeIf(r -> r.getType() != t);
                return t;
            }));
            resourceList.forEach(resource -> {
                try {
                    Resources.load(resource);
                    setupResource(resource);
                    project.addResource(resource);
                } catch (IOException e) {
                    log.error("Error importing resource file " + resource.getPath(), e);
                    showError(
                            MessageBundle.get("resources.import.error.single", resource.getPath().toString()));
                }
            });
            project.getResources().forEach(r -> keys.putAll(r.getTranslations()));
        }
        translationTree.setModel(new TranslationTreeModel(Lists.newArrayList(keys.keySet())));

        updateTreeNodeStatuses();
        updateHistory();
        updateUI();
        requestFocusInFirstResourceField();
    } catch (IOException e) {
        log.error("Error importing resource files", e);
        showError(MessageBundle.get("resources.import.error.multiple"));
    }
}

From source file:com.ikanow.aleph2.analytics.services.AnalyticsContext.java

@Override
public IBucketLogger getLogger(Optional<DataBucketBean> bucket) {
    final DataBucketBean b = bucket.orElseGet(() -> _mutable_state.bucket.get());
    return _mutable_state.bucket_loggers.computeIfAbsent(b.full_name(), (k) -> _logging_service.getLogger(b));
}

From source file:org.silverpeas.core.contribution.attachment.repository.DocumentRepository.java

/**
 * Change the management of versions of the document if the document is checkouted. If the
 * document is currently with version management, then all history is removed and the document
 * becomes a simple document with no more version management. If the document has no version
 * management then a new public version is created and the document becomes a document with a
 * version history management./*  w  w w. j  a  v a 2  s. c o  m*/
 *
 * @param session
 * @param documentPk the id of the document.
 * @param comment
 * @throws RepositoryException
 * @throws IOException
 */
public SimpleDocumentPK changeVersionState(Session session, SimpleDocumentPK documentPk, String comment)
        throws RepositoryException, IOException {
    try {
        Node documentNode = session.getNodeByIdentifier(documentPk.getId());
        boolean versionedNode = documentNode.getParent() instanceof Version
                || converter.isVersionedMaster(documentNode);
        Node parent = documentNode.getParent();
        if (parent instanceof Version) {
            Version selectedVersion = (Version) parent;
            VersionManager versionManager = documentNode.getSession().getWorkspace().getVersionManager();
            versionManager.restore(selectedVersion, true);
            documentNode = session
                    .getNodeByIdentifier(selectedVersion.getContainingHistory().getVersionableIdentifier());
        }
        if (!documentNode.isCheckedOut()) {
            checkoutNode(documentNode, null);
        }
        if (StringUtil.isDefined(comment)) {
            documentNode.setProperty(SLV_PROPERTY_COMMENT, comment);
        }
        final SimpleDocument origin = converter.fillDocument(documentNode, defaultLanguage);
        if (versionedNode) {
            removeHistory(documentNode);
            documentNode.removeMixin(MIX_SIMPLE_VERSIONABLE);
            documentNode.setProperty(SLV_PROPERTY_VERSIONED, false);
            documentNode.setProperty(SLV_PROPERTY_MAJOR, 0);
            documentNode.setProperty(SLV_PROPERTY_MINOR, 0);
            final SimpleDocument target = converter.fillDocument(documentNode, defaultLanguage);
            moveMultilangContent(origin, target);
            File currentDocumentDir = new File(target.getDirectoryPath(defaultLanguage)).getParentFile();
            final Optional<File[]> files = ofNullable(currentDocumentDir.getParentFile().listFiles());
            final File[] safeContents = files.orElseGet(() -> {
                SilverLogger.getLogger(this).warn(
                        "During version state removing, attempting to delete {0} which does not exist whereas JCR is having a reference on it",
                        currentDocumentDir.getParentFile().toString());
                return new File[0];
            });
            for (File versionDirectory : safeContents) {
                if (!versionDirectory.equals(currentDocumentDir)) {
                    FileUtils.deleteDirectory(versionDirectory);
                }
            }
        } else {
            documentNode.setProperty(SLV_PROPERTY_VERSIONED, true);
            documentNode.setProperty(SLV_PROPERTY_MAJOR, 1);
            documentNode.setProperty(SLV_PROPERTY_MINOR, 0);
            documentNode.addMixin(MIX_SIMPLE_VERSIONABLE);
            final SimpleDocument target = converter.fillDocument(documentNode, defaultLanguage);
            VersionManager versionManager = documentNode.getSession().getWorkspace().getVersionManager();
            documentNode.getSession().save();
            moveMultilangContent(origin, target);
            versionManager.checkin(documentNode.getPath());
        }
        return new SimpleDocumentPK(documentNode.getIdentifier(), documentPk);
    } catch (ItemNotFoundException infex) {
        return documentPk;
    }
}

From source file:com.ikanow.aleph2.analytics.services.AnalyticsContext.java

@Override
public boolean checkForListeners(final Optional<DataBucketBean> bucket, final AnalyticThreadJobBean job) {

    final DataBucketBean this_bucket = bucket.orElseGet(() -> _mutable_state.bucket.get());
    final AnalyticThreadJobOutputBean output = Optional.ofNullable(job.output())
            .orElseGet(() -> BeanTemplateUtils.build(AnalyticThreadJobOutputBean.class).done().get());

    final String topic_name = Optional.ofNullable(output.is_transient()).orElse(false)
            ? _distributed_services.generateTopicName(this_bucket.full_name(), Optional.of(job.name()))
            : _distributed_services.generateTopicName(
                    Optional.ofNullable(output.sub_bucket_path()).orElse(this_bucket.full_name()),
                    ICoreDistributedServices.QUEUE_END_NAME);

    return _distributed_services.doesTopicExist(topic_name);
}

From source file:com.ikanow.aleph2.analytics.services.AnalyticsContext.java

@Override
public Optional<String> getOutputTopic(final Optional<DataBucketBean> bucket, final AnalyticThreadJobBean job) {
    final DataBucketBean this_bucket = bucket.orElseGet(() -> _mutable_state.bucket.get());
    final AnalyticThreadJobOutputBean output = Optional.ofNullable(job.output())
            .orElseGet(() -> BeanTemplateUtils.build(AnalyticThreadJobOutputBean.class).done().get());
    final boolean is_transient = Optional.ofNullable(output.is_transient()).orElse(false);

    if (_streaming_types
            .contains(Optional.ofNullable(output.transient_type()).orElse(MasterEnrichmentType.none))) {
        final String topic = is_transient
                ? _distributed_services.generateTopicName(this_bucket.full_name(), Optional.of(job.name()))
                : _distributed_services.generateTopicName(
                        Optional.ofNullable(output.sub_bucket_path()).orElse(this_bucket.full_name()),
                        ICoreDistributedServices.QUEUE_END_NAME);
        return Optional.of(topic);
    } else/*w ww. j a v  a 2  s. c o m*/
        return Optional.empty();
}

From source file:com.ikanow.aleph2.analytics.services.AnalyticsContext.java

@Override
public CompletableFuture<DataBucketStatusBean> getBucketStatus(final Optional<DataBucketBean> bucket) {
    return this._core_management_db.readOnlyVersion().getDataBucketStatusStore()
            .getObjectById(bucket.orElseGet(() -> _mutable_state.bucket.get())._id())
            .thenApply(opt_status -> opt_status.get());
    // (ie will exception if not present)
}

From source file:org.ow2.proactive.connector.iaas.cloud.provider.vmware.VMWareProvider.java

private VirtualMachineRelocateSpec inferRelocateSpecsFromImageArgument(String image, Folder rootFolder) {

    Optional<ResourcePool> destinationPool = Optional.empty();
    Optional<HostSystem> destinationHost = Optional.empty();
    Optional<Datastore> destinationDatastore = Optional.empty();

    if (isMultiPartImage(image)) {
        String hostname = image.split(IMAGE_DELIMITER)[1];
        if (hostname.equals(RANDOM_HOST)) {
            destinationPool = vmWareProviderVirtualMachineUtil.getRandomResourcePool(rootFolder);
            if (destinationPool.isPresent()) {
                destinationDatastore = vmWareProviderVirtualMachineUtil
                        .getDatastoreWithMostSpaceFromPool(destinationPool.get());
            }/*from  w w  w.  j a v  a  2 s .  c o m*/
        } else {
            destinationPool = vmWareProviderVirtualMachineUtil.searchResourcePoolByHostname(hostname,
                    rootFolder);
            destinationHost = vmWareProviderVirtualMachineUtil.searchHostByName(hostname, rootFolder);
            if (destinationHost.isPresent()) {
                destinationDatastore = vmWareProviderVirtualMachineUtil
                        .getDatastoreWithMostSpaceFromHost(destinationHost.get());
            }
        }
    }

    VirtualMachine vmToClone = getVirtualMachineByNameOrUUID(getInstanceIdFromImage(image), rootFolder);

    return generateCustomRelocateSpecs(destinationPool.orElseGet(() -> {
        try {
            return vmToClone.getResourcePool();
        } catch (RemoteException e) {
            throw new RuntimeException("Unable to retrieve destination resource pool for VM:");
        }
    }), destinationHost.orElse(null), destinationDatastore.orElse(null));
}

From source file:com.ikanow.aleph2.analytics.services.AnalyticsContext.java

@Override
public List<String> getInputTopics(final Optional<DataBucketBean> bucket, final AnalyticThreadJobBean job,
        final AnalyticThreadJobInputBean job_input) {
    final DataBucketBean my_bucket = bucket.orElseGet(() -> _mutable_state.bucket.get());

    final AuthorizationBean auth_bean = new AuthorizationBean(my_bucket.owner_id());
    final ICrudService<DataBucketBean> secured_bucket_crud = _core_management_db.readOnlyVersion()
            .getDataBucketStore().secured(_service_context, auth_bean);

    return Optional.of(job_input).filter(i -> "stream".equalsIgnoreCase(i.data_service()))
            .map(Lambdas.wrap_u(i -> {
                //Topic naming: 5 cases: 
                // 1) i.resource_name_or_id is a bucket path, ie starts with "/", and then:
                // 1.1) if it ends ":name" then it points to a specific point in the bucket processing
                // 1.2) if it ends ":" or ":$start" then it points to the start of that bucket's processing (ie the output of its harvester) .. which corresponds to the queue name with no sub-channel
                // 1.3) otherwise it points to the end of the bucket's processing (ie immediately before it's output) ... which corresponds to the queue name with the sub-channel "$end"
                // 2) i.resource_name_or_id does not (start with a /), in which case:
                // 2.1) if it's a non-empty string, then it's the name of one the internal jobs (can interpret that as this.full_name + name)  
                // 2.2) if it's "" or null then it's pointing to the output of its own bucket's harvester

                final String[] bucket_subchannel = Lambdas.<String, String[]>wrap_u(s -> {
                    if (s.startsWith("/")) { //1.*
                        if (s.endsWith(":")) {
                            return new String[] { s.substring(0, s.length() - 1), "" }; // (1.2a)
                        } else {
                            final String[] b_sc = s.split(":");
                            if (1 == b_sc.length) {
                                return new String[] { b_sc[0], "$end" }; // (1.3)
                            } else if ("$start".equals(b_sc[1])) {
                                return new String[] { b_sc[0], "" }; // (1.2b)                           
                            } else {
                                return b_sc; //(1.1)
                            }// ww  w  . j  ava  2  s .  c om
                        }
                    } else { //2.*
                        return new String[] { my_bucket.full_name(), s };
                    }
                }).apply(i.resource_name_or_id());

                // Check this bucket exists and I have read access to it
                if (!my_bucket.full_name().equals(bucket_subchannel[0])) {
                    boolean found_bucket = secured_bucket_crud.getObjectBySpec(
                            CrudUtils.allOf(DataBucketBean.class).when(DataBucketBean::full_name,
                                    bucket_subchannel[0]),
                            Collections.emptyList(), // (don't want any part of the bucket, just whether it exists or not)
                            true).get().isPresent();
                    if (!found_bucket) {
                        throw new RuntimeException(ErrorUtils.get(ErrorUtils.BUCKET_NOT_FOUND_OR_NOT_READABLE,
                                bucket_subchannel[0]));
                    }
                }

                final String topic = _distributed_services.generateTopicName(bucket_subchannel[0],
                        Optional.of(bucket_subchannel[1]).filter(s -> !s.isEmpty()));
                _distributed_services.createTopic(topic, Optional.empty());
                return topic;
            })).map(i -> Arrays.asList(i)).orElse(Collections.emptyList());
}

From source file:com.ikanow.aleph2.analytics.services.AnalyticsContext.java

@Override
public Validation<BasicMessageBean, JsonNode> emitObject(final Optional<DataBucketBean> bucket,
        final AnalyticThreadJobBean job, final Either<JsonNode, Map<String, Object>> object,
        final Optional<AnnotationBean> annotations) {
    final DataBucketBean this_bucket = bucket.orElseGet(() -> _mutable_state.bucket.get());

    if (annotations.isPresent()) {
        throw new RuntimeException(ErrorUtils.get(ErrorUtils.NOT_YET_IMPLEMENTED, "annotations"));
    }/*from  w  w w .ja va 2s  . c  o  m*/
    final JsonNode obj_json = object.either(__ -> __,
            map -> (JsonNode) _mapper.convertValue(map, JsonNode.class));

    if (!this_bucket.full_name().equals(_mutable_state.bucket.get().full_name())) {
        return externalEmit(this_bucket, job, obj_json);
    }
    _mutable_state.has_unflushed_data = this._multi_writer.get().batchWrite(obj_json);

    final String topic = _distributed_services.generateTopicName(this_bucket.full_name(),
            ICoreDistributedServices.QUEUE_END_NAME);
    if (_distributed_services.doesTopicExist(topic)) {
        // (ie someone is listening in on our output data, so duplicate it for their benefit)
        _mutable_state.has_unflushed_data = true;
        _distributed_services.produce(topic, obj_json.toString());
    }
    //(else nothing to do)

    return Validation.success(obj_json);
}

From source file:com.ikanow.aleph2.analytics.services.AnalyticsContext.java

@Override
public List<String> getInputPaths(final Optional<DataBucketBean> bucket, final AnalyticThreadJobBean job,
        final AnalyticThreadJobInputBean job_input) {

    final DataBucketBean my_bucket = bucket.orElseGet(() -> _mutable_state.bucket.get());

    final AuthorizationBean auth_bean = new AuthorizationBean(my_bucket.owner_id());
    final ICrudService<DataBucketBean> secured_bucket_crud = _core_management_db.readOnlyVersion()
            .getDataBucketStore().secured(_service_context, auth_bean);

    return Optional.of(job_input).filter(i -> null != i.data_service())
            .filter(i -> "batch".equalsIgnoreCase(i.data_service())
                    || DataSchemaBean.StorageSchemaBean.name.equalsIgnoreCase(i.data_service()))
            .map(Lambdas.wrap_u(i -> {
                if ("batch".equalsIgnoreCase(i.data_service())) {
                    final String[] bucket_subchannel = Lambdas.<String, String[]>wrap_u(s -> {

                        // 1) If the resource starts with "/" then must point to an intermediate batch result of an external bucket
                        // 2) If the resource is a pointer then

                        if (s.startsWith("/")) { //1.*
                            if (s.endsWith(":")) {
                                return new String[] { s.substring(0, s.length() - 1), "" }; // (1.2a)
                            } else {
                                final String[] b_sc = s.split(":");
                                if (1 == b_sc.length) {
                                    return new String[] { my_bucket.full_name(), "" };
                                } else {
                                    return b_sc; //(1.1)
                                }//  w  ww  .j a v a2  s .com
                            }
                        } else { //2.*
                            return new String[] { my_bucket.full_name(), s };
                        }
                    }).apply(Optional.ofNullable(i.resource_name_or_id()).orElse(""));

                    final Optional<DataBucketBean> bucket_to_check = Lambdas.get(Lambdas.wrap_u(() -> {
                        if (bucket_subchannel[0] == my_bucket.full_name()) {
                            return Optional.of(my_bucket);
                        } else {
                            return secured_bucket_crud.getObjectBySpec(CrudUtils.allOf(DataBucketBean.class)
                                    .when(DataBucketBean::full_name, bucket_subchannel[0])).get();
                        }
                    }));
                    return Lambdas.get(() -> {
                        if (!bucket_subchannel[0].equals(my_bucket.full_name())
                                || !bucket_subchannel[1].isEmpty()) {
                            bucket_to_check.map(input_bucket -> input_bucket.analytic_thread())
                                    .flatMap(
                                            a_thread -> Optional.ofNullable(a_thread.jobs()))
                                    .flatMap(jobs -> jobs.stream()
                                            .filter(j -> bucket_subchannel[1].equals(j.name()))
                                            .filter(j -> _batch_types
                                                    .contains(Optionals.of(() -> j.output().transient_type())
                                                            .orElse(MasterEnrichmentType.none)))
                                            .filter(j -> Optionals.of(() -> j.output().is_transient())
                                                    .orElse(false))
                                            .findFirst())
                                    .orElseThrow(() -> new RuntimeException(ErrorUtils.get(
                                            ErrorUtils.INPUT_PATH_NOT_A_TRANSIENT_BATCH, my_bucket.full_name(),
                                            job.name(), bucket_subchannel[0], bucket_subchannel[1])));

                            return Arrays.asList(_storage_service.getBucketRootPath() + bucket_subchannel[0]
                                    + IStorageService.TRANSIENT_DATA_SUFFIX_SECONDARY + bucket_subchannel[1]
                                    + IStorageService.PRIMARY_BUFFER_SUFFIX + "**/*");
                        } else { // This is my input directory
                            return Arrays.asList(_storage_service.getBucketRootPath() + my_bucket.full_name()
                                    + IStorageService.TO_IMPORT_DATA_SUFFIX + "*");
                        }
                    });
                } else { // storage service ... 3 options :raw, :json, :processed (defaults to :processed)
                    if (Optional.of(true).equals(
                            Optional.ofNullable(i.config()).map(cfg -> cfg.high_granularity_filter()))) {
                        throw new RuntimeException(ErrorUtils.get(
                                ErrorUtils.HIGH_GRANULARITY_FILTER_NOT_SUPPORTED, my_bucket.full_name(),
                                job.name(), Optional.ofNullable(i.name()).orElse("(no name)")));
                    }

                    final String bucket_name = i.resource_name_or_id().split(":")[0];

                    // Check we have authentication for this bucket:

                    final boolean found_bucket = secured_bucket_crud
                            .getObjectBySpec(
                                    CrudUtils.allOf(DataBucketBean.class).when(DataBucketBean::full_name,
                                            bucket_name),
                                    Collections.emptyList(), // (don't want any part of the bucket, just whether it exists or not)
                                    true)
                            .get().isPresent();

                    if (!found_bucket) {
                        throw new RuntimeException(
                                ErrorUtils.get(ErrorUtils.BUCKET_NOT_FOUND_OR_NOT_READABLE, bucket_name));
                    }
                    final String sub_service = Patterns.match(i.resource_name_or_id()).<String>andReturn()
                            .when(s -> s.endsWith(":raw"), __ -> "raw/current/") // (input paths are always from primary)
                            .when(s -> s.endsWith(":json"), __ -> "json/current/")
                            .otherwise(__ -> "processed/current/");

                    final String base_path = _storage_service.getBucketRootPath() + bucket_name
                            + IStorageService.STORED_DATA_SUFFIX + sub_service;
                    return Optional.ofNullable(i.config())
                            .filter(cfg -> (null != cfg.time_min()) || (null != cfg.time_max())).map(cfg -> {
                                try {
                                    final FileContext fc = _storage_service
                                            .getUnderlyingPlatformDriver(FileContext.class, Optional.empty())
                                            .get();

                                    //DEBUG
                                    //_logger.warn("Found1: " + Arrays.stream(fc.util().listStatus(new Path(base_path))).map(f -> f.getPath().toString()).collect(Collectors.joining(";")));                                                            
                                    //_logger.warn("Found2: " + TimeSliceDirUtils.annotateTimedDirectories(tmp_paths).map(t -> t.toString()).collect(Collectors.joining(";")));
                                    //_logger.warn("Found3: " + TimeSliceDirUtils.getQueryTimeRange(cfg, new Date()));

                                    final Stream<String> paths = Arrays
                                            .stream(fc.util().listStatus(new Path(base_path)))
                                            .filter(f -> f.isDirectory())
                                            .map(f -> f.getPath().toUri().getPath()) // (remove the hdfs:// bit, which seems to be breaking with HA)
                                    ;

                                    return TimeSliceDirUtils
                                            .filterTimedDirectories(
                                                    TimeSliceDirUtils.annotateTimedDirectories(paths),
                                                    TimeSliceDirUtils.getQueryTimeRange(cfg, new Date()))
                                            .map(s -> s + "/*").collect(Collectors.toList());
                                } catch (Exception e) {
                                    return null;
                                } // will fall through to...
                            }).orElseGet(() -> {
                                // No time based filtering possible
                                final String suffix = "**/*";
                                return Arrays.asList(base_path + suffix);
                            });
                }
            })).orElse(Collections.emptyList());

}