Example usage for java.util.stream Collectors joining

List of usage examples for java.util.stream Collectors joining

Introduction

In this page you can find the example usage for java.util.stream Collectors joining.

Prototype

public static Collector<CharSequence, ?, String> joining(CharSequence delimiter) 

Source Link

Document

Returns a Collector that concatenates the input elements, separated by the specified delimiter, in encounter order.

Usage

From source file:com.blackducksoftware.integration.hub.detect.help.DetectOption.java

public HelpHtmlOption createHtmlOption() {
    final String description = getDetectOptionHelp().description;
    String acceptableValues = "";
    if (getValidValues().size() > 0) {
        acceptableValues = getValidValues().stream().collect(Collectors.joining(", "));
    }//from  w  w w.  ja v  a2 s .  com
    String deprecationNotice = "";
    if (getDetectOptionHelp().isDeprecated) {
        deprecationNotice = getDeprecationText() + getDetectOptionHelp().deprecation;
    }
    String propertyKey = "";
    String defaultValue = "";
    if (StringUtils.isNotBlank(detectProperty.getPropertyKey())) {
        propertyKey = detectProperty.getPropertyKey();
    }
    if (StringUtils.isNotBlank(detectProperty.getDefaultValue())) {
        defaultValue = detectProperty.getDefaultValue();
    }

    final HelpHtmlOption htmlOption = new HelpHtmlOption(propertyKey, defaultValue, description,
            acceptableValues, getDetectOptionHelp().detailedHelp, deprecationNotice);
    return htmlOption;
}

From source file:com.ikanow.aleph2.data_import_manager.analytics.utils.TestAnalyticTriggerCrudUtils.java

@Test
public void test_storeOrUpdateTriggerStage_updateActivation() throws InterruptedException {
    assertEquals(0, _test_crud.countObjects().join().intValue());

    final DataBucketBean bucket = buildBucket("/test/store/trigger", true);

    // Save a bucket
    {//from  ww  w . j  av  a  2s.  co m
        final Stream<AnalyticTriggerStateBean> test_stream = AnalyticTriggerBeanUtils
                .generateTriggerStateStream(bucket, false, Optional.empty());
        final List<AnalyticTriggerStateBean> test_list = test_stream.collect(Collectors.toList());

        System.out.println("Resources = \n" + test_list.stream()
                .map(t -> BeanTemplateUtils.toJson(t).toString()).collect(Collectors.joining("\n")));

        assertEquals(8L, test_list.size()); //(8 not 7 cos haven't dedup'd yet)

        // 4 internal dependencies
        assertEquals(4L, test_list.stream().filter(t -> null != t.job_name()).count());
        // 4 external dependencies
        assertEquals(4L, test_list.stream().filter(t -> null == t.job_name()).count());

        final Map<Tuple2<String, String>, List<AnalyticTriggerStateBean>> grouped_triggers = test_list.stream()
                .collect(Collectors.groupingBy(t -> Tuples._2T(t.bucket_name(), null)));

        AnalyticTriggerCrudUtils.storeOrUpdateTriggerStage(bucket, _test_crud, grouped_triggers).join();

        assertEquals(7L, _test_crud.countObjects().join().intValue());

        // Time is relative (default bucket check freq == 2 minutes), so all the triggers should have been set for "now"
        assertEquals(7L,
                _test_crud
                        .countObjectsBySpec(CrudUtils.allOf(AnalyticTriggerStateBean.class)
                                .rangeBelow(AnalyticTriggerStateBean::next_check, new Date(), false))
                        .join().intValue());

    }

    //DEBUG
    //this.printTriggerDatabase();

    // Sleep to change times
    Thread.sleep(100L);

    // 2) Modify and update
    final DataBucketBean mod_bucket = BeanTemplateUtils.clone(bucket)
            .with(DataBucketBean::analytic_thread,
                    BeanTemplateUtils.clone(bucket.analytic_thread())
                            .with(AnalyticThreadBean::jobs, bucket.analytic_thread().jobs().stream()
                                    .map(j -> BeanTemplateUtils.clone(j)
                                            .with(AnalyticThreadJobBean::name, "test_" + j.name()).done())
                                    .collect(Collectors.toList()))
                            .done())
            .done();
    {

        final Stream<AnalyticTriggerStateBean> test_stream = AnalyticTriggerBeanUtils
                .generateTriggerStateStream(mod_bucket, false, Optional.empty());
        final List<AnalyticTriggerStateBean> test_list = test_stream.collect(Collectors.toList());

        final Map<Tuple2<String, String>, List<AnalyticTriggerStateBean>> grouped_triggers = test_list.stream()
                .collect(Collectors.groupingBy(t -> Tuples._2T(t.bucket_name(), null)));

        AnalyticTriggerCrudUtils.storeOrUpdateTriggerStage(bucket, _test_crud, grouped_triggers).join();

        //DEBUG
        //this.printTriggerDatabase();

        assertEquals(7L, _test_crud.countObjects().join().intValue());

        assertEquals(4L,
                Optionals
                        .streamOf(_test_crud.getObjectsBySpec(CrudUtils.allOf(AnalyticTriggerStateBean.class))
                                .join().iterator(), false)
                        .filter(t -> null != t.job_name()).filter(t -> t.job_name().startsWith("test_"))
                        .count());
    }

    // 3) Since we're here might as well try activating...
    {
        final Stream<AnalyticTriggerStateBean> test_stream = Optionals.streamOf(
                _test_crud.getObjectsBySpec(CrudUtils.allOf(AnalyticTriggerStateBean.class)).join().iterator(),
                false);

        AnalyticTriggerCrudUtils.updateTriggerStatuses(_test_crud, test_stream, new Date(), Optional.of(true))
                .join();

        assertEquals(7L, _test_crud.countObjects().join().intValue());
        assertEquals(7L,
                Optionals
                        .streamOf(_test_crud.getObjectsBySpec(CrudUtils.allOf(AnalyticTriggerStateBean.class))
                                .join().iterator(), false)
                        .filter(t -> t.is_job_active())
                        .filter(t -> 100 != Optional.ofNullable(t.last_resource_size()).orElse(-1L)).count());
    }
    // 4) ... and then de-activating...
    {
        final Stream<AnalyticTriggerStateBean> test_stream = Optionals.streamOf(
                _test_crud.getObjectsBySpec(CrudUtils.allOf(AnalyticTriggerStateBean.class)).join().iterator(),
                false);

        AnalyticTriggerCrudUtils.updateTriggerStatuses(_test_crud, test_stream, new Date(), Optional.of(false))
                .join();

        assertEquals(7L, _test_crud.countObjects().join().intValue());
        assertEquals(7L,
                Optionals
                        .streamOf(_test_crud.getObjectsBySpec(CrudUtils.allOf(AnalyticTriggerStateBean.class))
                                .join().iterator(), false)
                        .filter(t -> !t.is_job_active())
                        .filter(t -> 100 != Optional.ofNullable(t.last_resource_size()).orElse(-1L)).count());
    }
    // 5) ... finally re-activate 
    {
        final Stream<AnalyticTriggerStateBean> test_stream = Optionals
                .streamOf(_test_crud.getObjectsBySpec(CrudUtils.allOf(AnalyticTriggerStateBean.class)).join()
                        .iterator(), false)
                .map(t -> BeanTemplateUtils.clone(t).with(AnalyticTriggerStateBean::curr_resource_size, 100L)
                        .done());

        AnalyticTriggerCrudUtils.updateTriggerStatuses(_test_crud, test_stream, new Date(), Optional.of(true))
                .join();

        assertEquals(7L, _test_crud.countObjects().join().intValue());
        assertEquals(7L,
                Optionals
                        .streamOf(_test_crud.getObjectsBySpec(CrudUtils.allOf(AnalyticTriggerStateBean.class))
                                .join().iterator(), false)
                        .filter(t -> t.is_job_active()).filter(t -> 100 == t.last_resource_size()).count());

    }
}

From source file:com.ikanow.aleph2.analytics.services.TestAnalyticsContext_FileSystemChecks.java

@Test
public void test_storageService_timedInputPaths() throws InterruptedException, ExecutionException {

    final AnalyticsContext test_context = _app_injector.getInstance(AnalyticsContext.class);

    File f = new File(_service_context.getStorageService().getBucketRootPath() + "/this_bucket"
            + IStorageService.STORED_DATA_SUFFIX_PROCESSED);
    FileUtils.deleteQuietly(f);// ww w  . j  a v a 2s.  c  o  m

    final AnalyticThreadJobBean.AnalyticThreadJobInputBean analytic_input1 = BeanTemplateUtils
            .build(AnalyticThreadJobBean.AnalyticThreadJobInputBean.class)
            .with(AnalyticThreadJobBean.AnalyticThreadJobInputBean::data_service, "storage_service")
            .with(AnalyticThreadJobBean.AnalyticThreadJobInputBean::resource_name_or_id, "/this_bucket") //(just avoids DB check)
            .with(AnalyticThreadJobBean.AnalyticThreadJobInputBean::config,
                    BeanTemplateUtils.build(AnalyticThreadJobBean.AnalyticThreadJobInputConfigBean.class)
                            .with(AnalyticThreadJobBean.AnalyticThreadJobInputConfigBean::time_min, "1 year")
                            .done().get())
            .done().get();

    final AnalyticThreadJobBean analytic_job1 = BeanTemplateUtils.build(AnalyticThreadJobBean.class)
            .with(AnalyticThreadJobBean::name, "test_name1")
            .with(AnalyticThreadJobBean::analytic_technology_name_or_id, "test_analytic_tech_id")
            .with(AnalyticThreadJobBean::inputs, Arrays.asList(analytic_input1))
            .with(AnalyticThreadJobBean::library_names_or_ids, Arrays.asList("id1", "name2")).done().get();

    final DataBucketBean test_bucket = BeanTemplateUtils.build(DataBucketBean.class)
            .with(DataBucketBean::_id, "this_bucket").with(DataBucketBean::full_name, "/this_bucket")
            .with(DataBucketBean::analytic_thread,
                    BeanTemplateUtils.build(AnalyticThreadBean.class)
                            .with(AnalyticThreadBean::jobs, Arrays.asList(analytic_job1)).done().get())
            .done().get();

    test_context._service_context.getService(IManagementDbService.class, Optional.empty()).get()
            .getDataBucketStore().storeObject(test_bucket).get();

    // Check falls back to current if dirs don't exist

    assertTrue("Falls back to full storage",
            test_context.getInputPaths(Optional.of(test_bucket), analytic_job1, analytic_input1).get(0)
                    .endsWith("/this_bucket/managed_bucket/import/stored/processed/current/**/*"));

    @SuppressWarnings("deprecation")
    final int year = 1900 + new Date().getYear();

    createDirs(f,
            Arrays.asList("test_" + (year - 3), "test_" + (year - 2), "test_" + (year - 1), "test_" + (year)));

    final List<String> res = test_context.getInputPaths(Optional.of(test_bucket), analytic_job1,
            analytic_input1);

    assertEquals("Timed slices: " + res.stream().collect(Collectors.joining(";")),
            Arrays.asList("/current/test_" + (year - 1) + "/*", "/current/test_" + year + "/*"),
            res.stream().map(s -> s.substring(s.indexOf("/current/"))).sorted().collect(Collectors.toList()));

    // Check high granularity mode is disabled:

    try {
        final AnalyticThreadJobBean.AnalyticThreadJobInputBean analytic_input_fail = BeanTemplateUtils
                .clone(analytic_input1)
                .with(AnalyticThreadJobBean.AnalyticThreadJobInputBean::config, BeanTemplateUtils
                        .build(AnalyticThreadJobBean.AnalyticThreadJobInputConfigBean.class)
                        .with(AnalyticThreadJobBean.AnalyticThreadJobInputConfigBean::high_granularity_filter,
                                true)
                        .done().get())
                .done();

        final AnalyticThreadJobBean analytic_job_fail = BeanTemplateUtils.clone(analytic_job1)
                .with(AnalyticThreadJobBean::inputs, Arrays.asList(analytic_input_fail)).done();

        final DataBucketBean test_bucket_fail = BeanTemplateUtils.clone(test_bucket)
                .with(DataBucketBean::analytic_thread,
                        BeanTemplateUtils.build(AnalyticThreadBean.class)
                                .with(AnalyticThreadBean::jobs, Arrays.asList(analytic_job_fail)).done().get())
                .done();

        test_context.getInputPaths(Optional.of(test_bucket_fail), analytic_job_fail, analytic_input_fail);
        fail("Should have thrown error");
    } catch (Exception e) {
        //we're good
    }
}

From source file:com.firewallid.termcloud.TermCloud.java

public void saveTermCloudAll(JavaPairRDD<String, List<Tuple2<String, Double>>> doc, String fileNamePrefix)
        throws IOException {
    List<Tuple2<String, List<Tuple2<String, Double>>>> collectDoc = doc.collect();

    if (collectDoc.isEmpty()) {
        return;//from w  ww. j ava2  s.c om
    }

    /* Reduced feature-value list */
    List<Tuple2<String, Double>> featureValueList = collectDoc.parallelStream()
            .map(titleFeatures -> titleFeatures._2).reduce((featureValueList1, featureValueList2) -> {
                List<Tuple2<String, Double>> combineList = FIUtils.combineList(featureValueList1,
                        featureValueList2);

                List<Tuple2<String, Double>> collect = combineList.parallelStream()
                        .collect(Collectors
                                .groupingBy(t -> t._1, Collectors.mapping(t -> t._2, Collectors.toList())))
                        .entrySet().parallelStream()
                        .map(t -> new Tuple2<String, Double>(t.getKey(),
                                t.getValue().parallelStream().mapToDouble(Double::doubleValue).sum()))
                        .collect(Collectors.toList());

                return collect;
            }).get();

    /* Sorting */
    List<Tuple2<String, Double>> featureValueListSorted = FIUtils.sortDescTupleListByValue(featureValueList);

    /* Top N */
    List<Tuple2<String, Double>> featureValueListTopN;
    if (featureValueListSorted.size() <= conf.getInt(TOPN, 100)) {
        featureValueListTopN = new ArrayList<>(featureValueListSorted);
    } else {
        featureValueListTopN = new ArrayList<>(featureValueListSorted.subList(0, conf.getInt(TOPN, 100)));
    }

    /* Text for file. One line, one feature-value pair */
    String featureValueText = featureValueListTopN.parallelStream()
            .map(feature -> feature._1 + StringEscapeUtils.unescapeJava(conf.get(LINE_DELIMITER)) + feature._2)
            .collect(Collectors.joining(System.lineSeparator()));

    /* Save to file */
    FIFile.writeStringToHDFSFile(FIFile.generateFullPath(conf.get(TERMCLOUD_FOLDER),
            createFileNameTermCloud(fileNamePrefix, conf.get(ALLNAME))), featureValueText);
}

From source file:com.evolveum.midpoint.model.intest.manual.CsvBackingStore.java

private String formatCsvLine(String[] data) {
    return Arrays.stream(data).map(s -> "\"" + s + "\"").collect(Collectors.joining(",")) + "\n";
}

From source file:io.soabase.halva.processor.caseclass.Templates.java

void addTuple(CaseClassSpec spec, TypeSpec.Builder builder) {
    Optional<Class<? extends Tuple>> optionalTupleClass = Tuple.getTupleClass(spec.getItems().size());
    boolean hasThisTuple = optionalTupleClass.isPresent();
    Class<? extends Tuple> tupleClass = optionalTupleClass.orElse(Tuple.class);

    TypeName typeName;//ww w  .j a va 2 s . c o  m
    CodeBlock codeBlock;
    if (hasThisTuple) {
        List<TypeName> typeNameList = spec.getItems().stream()
                .map(item -> environment.getGeneratedManager().toTypeName(item.getType()).box())
                .collect(Collectors.toList());
        typeName = getTupleType(tupleClass, typeNameList);

        String args = spec.getItems().stream().map(item -> item.getName() + "()")
                .collect(Collectors.joining(", "));

        codeBlock = CodeBlock.builder().addStatement("return $T.Tu($L)", Tuple.class, args).build();
    } else {
        typeName = ClassName.get(tupleClass);

        codeBlock = CodeBlock.builder().addStatement("throw new $T($S)", UnsupportedOperationException.class,
                "Too many arguments for a Tuple").build();
    }

    MethodSpec methodSpec = MethodSpec.methodBuilder("tuple").returns(typeName).addAnnotation(Override.class)
            .addModifiers(Modifier.PUBLIC).addCode(codeBlock).build();
    builder.addMethod(methodSpec);
}

From source file:com.microsoft.azure.hdinsight.spark.run.SparkBatchJobDebuggerRunner.java

@Override
protected void execute(@NotNull ExecutionEnvironment environment, @Nullable Callback callback,
        @NotNull RunProfileState state) throws ExecutionException {
    SparkBatchJobSubmissionState submissionState = (SparkBatchJobSubmissionState) state;
    SparkSubmitModel submitModel = submissionState.getSubmitModel();
    SparkSubmissionParameter submissionParameter = submitModel.getSubmissionParameter();
    IClusterDetail clusterDetail = submitModel.getSelectedClusterDetail();
    Map<String, String> postEventProperty = new HashMap<>();

    submitModel//w ww . java 2 s. c o  m
            .buildArtifactObservable(submissionParameter.getArtifactName()).flatMap((artifact) -> submitModel
                    .deployArtifactObservable(artifact, clusterDetail).subscribeOn(Schedulers.io()))
            .map((selectedClusterDetail) -> {
                // Create Batch Spark Debug Job
                try {
                    return submitModel.tryToCreateBatchSparkDebugJob(selectedClusterDetail);
                } catch (Exception e) {
                    HDInsightUtil.setJobRunningStatus(submitModel.getProject(), false);
                    throw Exceptions.propagate(e);
                }
            })
            .flatMap(
                    (remoteDebugJob) -> startDebuggerObservable(
                            environment, callback, submissionState, remoteDebugJob)
                                    .subscribeOn(Schedulers.computation()).zipWith( // Block with getting the job log from cluster
                                            submitModel
                                                    .jobLogObservable(remoteDebugJob.getBatchId(),
                                                            clusterDetail)
                                                    .subscribeOn(Schedulers.computation()),
                                            (session, ignore) -> session)
                                    .doOnError(err -> {
                                        try {
                                            HDInsightUtil.showErrorMessageOnSubmissionMessageWindow(
                                                    submitModel.getProject(),
                                                    "Error : Spark batch debugging job is killed, got exception "
                                                            + err);

                                            remoteDebugJob.killBatchJob();
                                            HDInsightUtil.setJobRunningStatus(submitModel.getProject(), false);
                                        } catch (IOException ignore) {
                                        }
                                    }))
            .subscribe(sparkBatchDebugSession -> {
                // Spark Job is done
                HDInsightUtil.showInfoOnSubmissionMessageWindow(submitModel.getProject(),
                        "Info : Debugging Spark batch job in cluster is done.");

                sparkBatchDebugSession.close();

                HDInsightUtil.setJobRunningStatus(submitModel.getProject(), false);

                postEventProperty.put("IsSubmitSucceed", "true");
                AppInsightsClient.create(HDInsightBundle.message("SparkRunConfigDebugButtonClick"), null,
                        postEventProperty);
            }, (throwable) -> {
                // set the running flag to false
                HDInsightUtil.setJobRunningStatus(submitModel.getProject(), false);

                String errorMessage;

                if (throwable instanceof CompositeException) {
                    CompositeException exceptions = (CompositeException) throwable;

                    errorMessage = exceptions.getExceptions().stream().map(Throwable::getMessage)
                            .collect(Collectors.joining("; "));
                } else {
                    errorMessage = throwable.getMessage();
                }

                HDInsightUtil.showErrorMessageOnSubmissionMessageWindow(submitModel.getProject(),
                        "Error : Spark batch Job remote debug failed, got exception: " + errorMessage);

                postEventProperty.put("IsSubmitSucceed", "false");
                postEventProperty.put("SubmitFailedReason", errorMessage.substring(0, 50));
                AppInsightsClient.create(HDInsightBundle.message("SparkRunConfigDebugButtonClick"), null,
                        postEventProperty);
            });
}

From source file:com.ikanow.aleph2.example.flume_harvester.utils.FlumeUtils.java

/** Auto-generates the flume config from an input block
 *  If it's in test mode it also deletes the trackerDir (so this can be used for purging)
 * @param bucket_config/* w  ww . j a v a  2 s.c om*/
 * @param morphlines_config_path
 * @param test_mode
 * @return
 */
public static FlumeBucketConfigBean createAutoFlumeConfig(final DataBucketBean bucket,
        final FlumeBucketConfigBean bucket_config, final boolean test_mode) {
    //TODO (ALEPH-10): eventually add support for additiona short cuts here
    //TODO (ALEPH-10): security

    final Collection<SpoolDirConfig> dirs = getSpoolDirs(bucket_config);
    final AtomicInteger counter = new AtomicInteger(0);

    if (!dirs.isEmpty()) {
        final ImmutableMap<String, String> new_flume_builder = dirs.stream()
                .reduce(ImmutableMap.<String, String>builder()
                        // defaults
                        .put("channels", "mem").put("channels:mem:capacity", "1000")
                        .put("channels:mem:transactionCapacity", "100").put("channels:mem:type", "memory"),
                        (acc, v) -> {
                            final int count = counter.incrementAndGet();

                            // (some tidy up that occurs in test mode)
                            return Optional.<ImmutableMap.Builder<String, String>>of(acc
                                    .put("sources:file_in_" + count + ":type", "spooldir")
                                    .put("sources:file_in_" + count + ":channels", "mem")
                                    .put("sources:file_in_" + count + ":trackerDir",
                                            getTrackingDirSuffix(bucket))
                                    .put("sources:file_in_" + count + ":deletePolicy",
                                            (v.delete_on_ingest() ? "immediate" : "never"))
                                    .put("sources:file_in_" + count + ":spoolDir",
                                            test_mode ? v.path() + "/" + getTestDirSuffix(bucket) : v.path())
                                    .put("sources:file_in_" + count + ":ignorePattern",
                                            Optional.ofNullable(v.ignore_pattern()).orElse("^$")))
                                    // Some optional fields
                                    .map(acc2 -> {
                                        return Optional.ofNullable(v.append_basename_field()).map(field -> acc2
                                                .put("sources:file_in_" + count + ":basenameHeader", "true")
                                                .put("sources:file_in_" + count + ":basenameHeaderKey", field))
                                                .orElse(acc);
                                    }).map(acc2 -> {
                                        return Optional.ofNullable(v.append_path_field()).map(field -> acc2
                                                .put("sources:file_in_" + count + ":fileHeader", "true")
                                                .put("sources:file_in_" + count + ":fileHeaderKey", field))
                                                .orElse(acc);
                                    }).get();
                        }, (acc1, acc2) -> acc1 // (can't happen in practice)   
                ).put("sources", StreamUtils.zipWithIndex(dirs.stream())
                        .map(i -> ("file_in_" + (1 + i.getIndex()))).collect(Collectors.joining(" ")))
                .build();
        ;

        // Clone the config with the new flume config
        return BeanTemplateUtils.clone(bucket_config)
                .with(FlumeBucketConfigBean::flume_config, new_flume_builder).done();
    } else { // Leave unchanged
        return bucket_config;
    }
}

From source file:integration.MessagingApplicationTests.java

private void thenAllSpansArePresent(Optional<Span> firstHttpSpan, List<Span> eventSpans,
        Optional<Span> lastHttpSpan, Optional<Span> eventSentSpan, Optional<Span> eventReceivedSpan) {
    log.info("Found following spans");
    log.info("First http span " + firstHttpSpan);
    log.info("Event spans " + eventSpans);
    log.info("Event sent span " + eventSentSpan);
    log.info("Event received span " + eventReceivedSpan);
    log.info("Last http span " + lastHttpSpan);
    log.info("All found spans \n" + this.integrationTestSpanCollector.hashedSpans.stream().map(Span::toString)
            .collect(Collectors.joining("\n")));
    then(firstHttpSpan.isPresent()).isTrue();
    then(eventSpans).isNotEmpty();//from w w  w.jav  a2 s  . com
    then(eventSentSpan.isPresent()).isTrue();
    then(eventReceivedSpan.isPresent()).isTrue();
    then(lastHttpSpan.isPresent()).isTrue();
}

From source file:co.runrightfast.vertx.orientdb.config.OAutomaticBackupConfig.java

@Override
public OServerHandlerConfiguration get() {
    final OServerHandlerConfiguration config = new OServerHandlerConfiguration();
    config.clazz = OAutomaticBackup.class.getName();
    if (!enabled) {
        config.parameters = new OServerParameterConfiguration[] {
                new OServerParameterConfiguration("enabled", FALSE.toString()) };
    } else {/*from w w  w  . ja v a 2s  .c  o m*/
        config.parameters = new OServerParameterConfiguration[] {
                new OServerParameterConfiguration("enabled", TRUE.toString()),
                new OServerParameterConfiguration("delay", delay.toDelaySetting()),
                new OServerParameterConfiguration("backup", backupDir.toAbsolutePath().toString()),
                new OServerParameterConfiguration("target.fileName", "${DBNAME}-${DATE:yyyyMMddHHmmss}.zip"),
                new OServerParameterConfiguration("compressionLevel", Integer.toString(compressionLevel)),
                new OServerParameterConfiguration("bufferSize", Integer.toString(bufferSizeMB * 1024 * 1000)),
                new OServerParameterConfiguration("db.include",
                        databaseIncludes.stream().collect(Collectors.joining(","))),
                new OServerParameterConfiguration("db.exclude",
                        databaseExcludes.stream().collect(Collectors.joining(","))) };

    }

    return config;
}