List of usage examples for java.util.stream Collectors joining
public static Collector<CharSequence, ?, String> joining(CharSequence delimiter)
From source file:com.evolveum.midpoint.web.page.admin.reports.dto.AuditEventRecordProvider.java
private String generateFullQuery(Map<String, Object> parameters, boolean ordered, boolean isCount) { String query = auditEventQuery; boolean filteredOnChangedItem = parameters.get(PARAMETER_CHANGED_ITEM) != null; boolean filteredOnValueRefTargetNames = filteredOnValueRefTargetNames(parameters); List<String> conditions = new ArrayList<>(); if (parameters.get(PARAMETER_FROM) != null) { conditions.add("aer.timestamp >= :from"); } else {//from ww w. ja va 2s . c om parameters.remove(PARAMETER_FROM); } if (parameters.get(PARAMETER_TO) != null) { conditions.add("aer.timestamp <= :to"); } else { parameters.remove(PARAMETER_TO); } if (parameters.get(PARAMETER_EVENT_TYPE) != null) { conditions.add("aer.eventType = :eventType"); } else { parameters.remove(PARAMETER_EVENT_TYPE); } if (parameters.get(PARAMETER_EVENT_STAGE) != null) { conditions.add("aer.eventStage = :eventStage"); } else { parameters.remove(PARAMETER_EVENT_STAGE); } Object outcomeValue = parameters.get(PARAMETER_OUTCOME); if (outcomeValue != null) { if (outcomeValue != OperationResultStatusType.UNKNOWN) { conditions.add("aer.outcome = :outcome"); } else { // this is a bit questionable; but let us do it in this way to ensure compliance with GUI (null is shown as UNKNOWN) // see MID-3903 conditions.add("(aer.outcome = :outcome or aer.outcome is null)"); } } else { parameters.remove(PARAMETER_OUTCOME); } if (parameters.get(PARAMETER_INITIATOR_NAME) != null) { conditions.add("aer.initiatorOid = :initiatorName"); } else { parameters.remove(PARAMETER_INITIATOR_NAME); } if (parameters.get(PARAMETER_CHANNEL) != null) { conditions.add("aer.channel = :channel"); } else { parameters.remove(PARAMETER_CHANNEL); } if (parameters.get(PARAMETER_HOST_IDENTIFIER) != null) { conditions.add("aer.hostIdentifier = :hostIdentifier"); } else { parameters.remove(PARAMETER_HOST_IDENTIFIER); } if (parameters.get(PARAMETER_TARGET_OWNER_NAME) != null) { conditions.add("aer.targetOwnerOid = :targetOwnerName"); } else { parameters.remove(PARAMETER_TARGET_OWNER_NAME); } if (parameters.get(PARAMETER_TARGET_NAMES) != null) { conditions.add("aer.targetOid in ( :targetNames )"); } else { parameters.remove(PARAMETER_TARGET_NAMES); } if (parameters.get(PARAMETER_TASK_IDENTIFIER) != null) { conditions.add("aer.taskIdentifier = :taskIdentifier"); } else { parameters.remove(PARAMETER_TASK_IDENTIFIER); } if (filteredOnChangedItem) { conditions.add("item.changedItemPath = :changedItem"); } else { parameters.remove(PARAMETER_CHANGED_ITEM); } if (filteredOnValueRefTargetNames) { conditions.add("rv.targetName.orig in ( :valueRefTargetNames )"); } else { parameters.remove(PARAMETER_VALUE_REF_TARGET_NAMES); } if (query == null) { query = AUDIT_RECORDS_QUERY_CORE; if (filteredOnChangedItem) { query += AUDIT_RECORDS_QUERY_ITEMS_CHANGED; } if (filteredOnValueRefTargetNames) { query += AUDIT_RECORDS_QUERY_REF_VALUES; } if (!conditions.isEmpty()) { query += " where "; } } if (isCount) { query = AUDIT_RECORDS_QUERY_COUNT + query; } query += conditions.stream().collect(Collectors.joining(" and ")); if (ordered) { query += AUDIT_RECORDS_ORDER_BY; } return query; }
From source file:com.firewallid.networkanalysis.NetworkAnalysis.java
public void saveNAFileEdges(JavaPairRDD<String, Set<Tuple2<String, String>>> edges, String prefixFileName) { String docDelimiter = StringEscapeUtils.unescapeJava(firewallConf.get(DOC_DELIMITER)); String naFolder = firewallConf.get(NETWORKANALYSIS_FOLDER); String nameDelimiter = firewallConf.get(NAME_DELIMITER); edges.mapToPair(edge -> new Tuple2<>(edge._1(), "id" + docDelimiter + "node1" + docDelimiter + "node2" + System.lineSeparator() + StreamUtils.zipWithIndex(edge._2().parallelStream()).parallel() .map(edgeItem -> edgeItem.getIndex() + docDelimiter + edgeItem.getValue()._1() + docDelimiter + edgeItem.getValue()._2()) .collect(Collectors.joining(System.lineSeparator())))) .foreach(edge -> FIFile.writeStringToHDFSFile( FIFile.generateFullPath(naFolder, prefixFileName + nameDelimiter + edge._1() + nameDelimiter + "edge.csv"), edge._2()));// w w w. jav a2 s. c o m }
From source file:org.elasticsearch.backwards.IndexingIT.java
public void testSeqNoCheckpoints() throws Exception { Nodes nodes = buildNodeAndVersions(); assumeFalse("new nodes is empty", nodes.getNewNodes().isEmpty()); logger.info("cluster discovered: {}", nodes.toString()); final List<String> bwcNamesList = nodes.getBWCNodes().stream().map(Node::getNodeName) .collect(Collectors.toList()); final String bwcNames = bwcNamesList.stream().collect(Collectors.joining(",")); Settings.Builder settings = Settings.builder().put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) .put("index.routing.allocation.include._name", bwcNames); final boolean checkGlobalCheckpoints = nodes.getMaster().getVersion() .onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED); logger.info("master version is [{}], global checkpoints will be [{}]", nodes.getMaster().getVersion(), checkGlobalCheckpoints ? "checked" : "not be checked"); if (checkGlobalCheckpoints) { settings.put(IndexSettings.INDEX_SEQ_NO_CHECKPOINT_SYNC_INTERVAL.getKey(), "100ms"); }/*from w w w. j a v a 2 s .c o m*/ final String index = "test"; createIndex(index, settings.build()); try (RestClient newNodeClient = buildClient(restClientSettings(), nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { int numDocs = 0; final int numberOfInitialDocs = 1 + randomInt(5); logger.info("indexing [{}] docs initially", numberOfInitialDocs); numDocs += indexDocs(index, 0, numberOfInitialDocs); assertSeqNoOnShards(nodes, checkGlobalCheckpoints, 0, newNodeClient); logger.info("allowing shards on all nodes"); updateIndexSetting(index, Settings.builder().putNull("index.routing.allocation.include._name")); ensureGreen(); assertOK(client().performRequest("POST", index + "/_refresh")); for (final String bwcName : bwcNamesList) { assertCount(index, "_only_nodes:" + bwcName, numDocs); } final int numberOfDocsAfterAllowingShardsOnAllNodes = 1 + randomInt(5); logger.info("indexing [{}] docs after allowing shards on all nodes", numberOfDocsAfterAllowingShardsOnAllNodes); numDocs += indexDocs(index, numDocs, numberOfDocsAfterAllowingShardsOnAllNodes); assertSeqNoOnShards(nodes, checkGlobalCheckpoints, 0, newNodeClient); Shard primary = buildShards(nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get(); logger.info("moving primary to new node by excluding {}", primary.getNode().getNodeName()); updateIndexSetting(index, Settings.builder().put("index.routing.allocation.exclude._name", primary.getNode().getNodeName())); ensureGreen(); int numDocsOnNewPrimary = 0; final int numberOfDocsAfterMovingPrimary = 1 + randomInt(5); logger.info("indexing [{}] docs after moving primary", numberOfDocsAfterMovingPrimary); numDocsOnNewPrimary += indexDocs(index, numDocs, numberOfDocsAfterMovingPrimary); numDocs += numberOfDocsAfterMovingPrimary; assertSeqNoOnShards(nodes, checkGlobalCheckpoints, numDocsOnNewPrimary, newNodeClient); /* * Dropping the number of replicas to zero, and then increasing it to one triggers a recovery thus exercising any BWC-logic in * the recovery code. */ logger.info("setting number of replicas to 0"); updateIndexSetting(index, Settings.builder().put("index.number_of_replicas", 0)); final int numberOfDocsAfterDroppingReplicas = 1 + randomInt(5); logger.info("indexing [{}] docs after setting number of replicas to 0", numberOfDocsAfterDroppingReplicas); numDocsOnNewPrimary += indexDocs(index, numDocs, numberOfDocsAfterDroppingReplicas); numDocs += numberOfDocsAfterDroppingReplicas; logger.info("setting number of replicas to 1"); updateIndexSetting(index, Settings.builder().put("index.number_of_replicas", 1)); ensureGreen(); assertOK(client().performRequest("POST", index + "/_refresh")); // the number of documents on the primary and on the recovered replica should match the number of indexed documents assertCount(index, "_primary", numDocs); assertCount(index, "_replica", numDocs); assertSeqNoOnShards(nodes, checkGlobalCheckpoints, numDocsOnNewPrimary, newNodeClient); } }
From source file:de.pixida.logtest.engine.Automaton.java
public Automaton(final IAutomatonDefinition aAutomatonDefinition, final Map<String, String> aParameters) { LOG.debug("Creating automaton with definition '{}' and parameters '{}'", aAutomatonDefinition, aParameters); Validate.notNull(aAutomatonDefinition); Validate.notNull(aParameters);/* ww w.j a v a 2s. c o m*/ this.automatonDefinition = aAutomatonDefinition; this.parameters = new AutomatonParameters(aParameters); try { this.loadAutomatonFromDefinition(); if (this.description != null) { this.description = this.parameters.insertAllParameters(this.description); } LOG.debug("Automaton description: {}", this.description); this.checkAutomatonAndFindInitialNode(); this.compileScripts(); } catch (final InvalidAutomatonDefinitionException iade) { final String errorsWithoutStackTraces = ExceptionUtils.getThrowableList(iade).stream() .map(e -> e.getMessage()).collect(Collectors.joining("; ")); LOG.error("Error while initializing automaton '{}': {}", this.automatonDefinition, errorsWithoutStackTraces); this.thrownException = iade; } catch (final RuntimeException re) { this.thrownException = re; throw re; } }
From source file:com.asakusafw.runtime.io.text.directio.AbstractTextStreamFormatTest.java
static byte[] serialize(String[][] fields) { return Arrays.stream(fields).map(ss -> String.join(":", ss)).collect(Collectors.joining("\n")) .getBytes(StandardCharsets.UTF_8); }
From source file:com.ggvaidya.scinames.ui.BulkChangeEditorController.java
private void setupChangesTableView() { changesTableView.setEditable(true);/* w w w . jav a 2 s.c om*/ changesTableView.getSelectionModel().setSelectionMode(SelectionMode.MULTIPLE); changesTableView.getColumns().clear(); TableColumn<PotentialChange, ChangeType> colChangeType = new TableColumn<>("Type"); colChangeType.setCellFactory(ComboBoxTableCell.forTableColumn(new ChangeTypeStringConverter(), ChangeType.ADDITION, ChangeType.DELETION, ChangeType.RENAME, ChangeType.LUMP, ChangeType.SPLIT, ChangeType.ERROR)); colChangeType.setCellValueFactory(new PropertyValueFactory<>("type")); colChangeType.setPrefWidth(100.0); colChangeType.setEditable(true); changesTableView.getColumns().add(colChangeType); TableColumn<PotentialChange, ObservableSet<Name>> colChangeFrom = new TableColumn<>("From"); colChangeFrom.setCellFactory(TextFieldTableCell.forTableColumn(new NameSetStringConverter())); colChangeFrom.setCellValueFactory(new PropertyValueFactory<>("from")); colChangeFrom.setPrefWidth(200.0); colChangeFrom.setEditable(true); changesTableView.getColumns().add(colChangeFrom); TableColumn<PotentialChange, ObservableSet<Name>> colChangeTo = new TableColumn<>("To"); colChangeTo.setCellFactory(TextFieldTableCell.forTableColumn(new NameSetStringConverter())); colChangeTo.setCellValueFactory(new PropertyValueFactory<>("to")); colChangeTo.setPrefWidth(200.0); colChangeTo.setEditable(true); changesTableView.getColumns().add(colChangeTo); TableColumn<PotentialChange, String> colChangeDataset = new TableColumn<>("Dataset"); colChangeDataset.setCellValueFactory(new PropertyValueFactory<>("dataset")); colChangeDataset.setPrefWidth(100.0); changesTableView.getColumns().add(colChangeDataset); ChangeFilter cf = project.getChangeFilter(); TableColumn<PotentialChange, String> colFiltered = new TableColumn<>("Eliminated by filter?"); colFiltered.setCellValueFactory( (TableColumn.CellDataFeatures<PotentialChange, String> features) -> new ReadOnlyStringWrapper( cf.test(features.getValue()) ? "Allowed" : "Eliminated")); changesTableView.getColumns().add(colFiltered); TableColumn<PotentialChange, String> colNote = new TableColumn<>("Note"); colNote.setCellFactory(TextFieldTableCell.forTableColumn()); colNote.setCellValueFactory(new PropertyValueFactory<>("note")); colNote.setPrefWidth(100.0); colNote.setEditable(true); changesTableView.getColumns().add(colNote); TableColumn<PotentialChange, String> colProperties = new TableColumn<>("Properties"); colProperties.setCellValueFactory( (TableColumn.CellDataFeatures<PotentialChange, String> features) -> new ReadOnlyStringWrapper( features.getValue().getProperties().entrySet().stream() .map(entry -> entry.getKey() + ": " + entry.getValue()).sorted() .collect(Collectors.joining("; ")))); changesTableView.getColumns().add(colProperties); TableColumn<PotentialChange, String> colCitations = new TableColumn<>("Citations"); colCitations.setCellValueFactory( (TableColumn.CellDataFeatures<PotentialChange, String> features) -> new ReadOnlyStringWrapper( features.getValue().getCitationStream().map(citation -> citation.getCitation()).sorted() .collect(Collectors.joining("; ")))); changesTableView.getColumns().add(colCitations); TableColumn<PotentialChange, String> colGenera = new TableColumn<>("Genera"); colGenera.setCellValueFactory( (TableColumn.CellDataFeatures<PotentialChange, String> features) -> new ReadOnlyStringWrapper( String.join(", ", features.getValue().getAllNames().stream().map(n -> n.getGenus()) .distinct().sorted().collect(Collectors.toList())))); changesTableView.getColumns().add(colGenera); TableColumn<PotentialChange, String> colSpecificEpithet = new TableColumn<>("Specific epithet"); colSpecificEpithet.setCellValueFactory( (TableColumn.CellDataFeatures<PotentialChange, String> features) -> new ReadOnlyStringWrapper(String .join(", ", features.getValue().getAllNames().stream().map(n -> n.getSpecificEpithet()) .filter(s -> s != null).distinct().sorted().collect(Collectors.toList())))); changesTableView.getColumns().add(colSpecificEpithet); // TODO: if we can get an ObservableList over tp.getAllChanges(), then this table // will update dynamically as changes are made. Won't that be something. // Yes, we want to getAllChanges() so we can see which ones are filtered out. changesTableView.setItems(foundChanges); changesTableView.getSortOrder().add(colChangeType); }
From source file:kafka.benchmark.AdvertisingTopology.java
public static String listOfStringToString(List<String> list, String port) { return list.stream().map(item -> item + ":" + port).collect(Collectors.joining(",")); }
From source file:com.gitpitch.services.OfflineService.java
private int processMarkdown(PitchParams pp, Path zipRoot, Optional<SlideshowModel> ssmo) { int status = STATUS_UNDEF; String consumed = null;//from w w w. j a v a 2 s .c o m Path mdOnlinePath = zipRoot.resolve(PITCHME_ONLINE_PATH); File mdOnlineFile = mdOnlinePath.toFile(); if (mdOnlineFile.exists()) { GRSService grsService = grsManager.getService(grsManager.get(pp)); MarkdownRenderer mrndr = MarkdownRenderer.build(pp, ssmo, grsService, diskService); MarkdownModel markdownModel = (MarkdownModel) markdownModelFactory.create(mrndr); try (Stream<String> stream = Files.lines(mdOnlinePath)) { consumed = stream.map(md -> { return markdownModel.offline(md); }).collect(Collectors.joining("\n")); Path mdOfflinePath = zipRoot.resolve(PITCHME_OFFLINE_PATH); Files.write(mdOfflinePath, consumed.getBytes()); fetchOnlineAssets(pp, zipRoot); status = STATUS_OK; } catch (Exception mex) { log.warn("processMarkdown: ex={}", mex); } } else { log.warn("processMarkdown: mdOnline not found={}", mdOnlineFile); } log.debug("processMarkdown: returning status={}", status); return status; }
From source file:com.ikanow.aleph2.analytics.services.TestDeduplicationService.java
@Test public void test_validateModule() { // Fails because no doc schema nor doc schema override, no lookup override {// w ww.java 2s . com final DataBucketBean test_bucket = getDocBucket("/test/simple", null); final EnrichmentControlMetadataBean control = BeanTemplateUtils .build(EnrichmentControlMetadataBean.class).with(EnrichmentControlMetadataBean::name, "test") .done().get(); final DeduplicationService test_module = new DeduplicationService(); final IEnrichmentModuleContext enrich_context = Mockito.mock(IEnrichmentModuleContext.class); Mockito.when(enrich_context.getServiceContext()).thenReturn(_service_context); final Collection<BasicMessageBean> res = test_module.validateModule(enrich_context, test_bucket, control); // Check errors assertFalse(res.isEmpty()); assertTrue(res.stream().allMatch(b -> !b.success())); } // No doc schema, no lookup override set { final DataBucketBean test_bucket = getDocBucket("/test/simple", null); final EnrichmentControlMetadataBean control = BeanTemplateUtils .build(EnrichmentControlMetadataBean.class).with(EnrichmentControlMetadataBean::name, "test") .with(EnrichmentControlMetadataBean::config, BeanTemplateUtils.toMap(BeanTemplateUtils .build(DedupConfigBean.class) .with(DedupConfigBean::doc_schema_override, BeanTemplateUtils.build(DocumentSchemaBean.class) .with(DocumentSchemaBean::lookup_service_override, "").done().get()) .done().get())) .done().get(); final DeduplicationService test_module = new DeduplicationService(); final IEnrichmentModuleContext enrich_context = Mockito.mock(IEnrichmentModuleContext.class); Mockito.when(enrich_context.getServiceContext()).thenReturn(_service_context); final Collection<BasicMessageBean> res = test_module.validateModule(enrich_context, test_bucket, control); // Check errors assertFalse(res.isEmpty()); assertTrue(res.stream().allMatch(b -> !b.success())); } // No doc schema, but lookup override set (should work) { final DataBucketBean test_bucket = getDocBucket("/test/simple", null); final EnrichmentControlMetadataBean control = BeanTemplateUtils .build(EnrichmentControlMetadataBean.class).with(EnrichmentControlMetadataBean::name, "test") .with(EnrichmentControlMetadataBean::config, BeanTemplateUtils.toMap(BeanTemplateUtils.build(DedupConfigBean.class) .with(DedupConfigBean::doc_schema_override, BeanTemplateUtils.build(DocumentSchemaBean.class) .with(DocumentSchemaBean::lookup_service_override, "search_index_service") .done().get()) .done().get())) .done().get(); final DeduplicationService test_module = new DeduplicationService(); final IEnrichmentModuleContext enrich_context = Mockito.mock(IEnrichmentModuleContext.class); Mockito.when(enrich_context.getServiceContext()).thenReturn(_service_context); final Collection<BasicMessageBean> res = test_module.validateModule(enrich_context, test_bucket, control); // Check errors assertTrue("Errors = " + res.stream().map(b -> b.message()).collect(Collectors.joining(";")), res.isEmpty()); } // No doc schema, but lookup override set ... but is invalid { final DataBucketBean test_bucket = getDocBucket("/test/simple", null); final EnrichmentControlMetadataBean control = BeanTemplateUtils .build(EnrichmentControlMetadataBean.class).with(EnrichmentControlMetadataBean::name, "test") .with(EnrichmentControlMetadataBean::config, BeanTemplateUtils.toMap(BeanTemplateUtils.build(DedupConfigBean.class) .with(DedupConfigBean::doc_schema_override, BeanTemplateUtils.build(DocumentSchemaBean.class) .with(DocumentSchemaBean::lookup_service_override, "rabbit") .done().get()) .done().get())) .done().get(); final DeduplicationService test_module = new DeduplicationService(); final IEnrichmentModuleContext enrich_context = Mockito.mock(IEnrichmentModuleContext.class); Mockito.when(enrich_context.getServiceContext()).thenReturn(_service_context); final Collection<BasicMessageBean> res = test_module.validateModule(enrich_context, test_bucket, control); // Check errors assertFalse(res.isEmpty()); assertTrue(res.stream().allMatch(b -> !b.success())); } // Doc schema, no lookup override { final DataBucketBean test_bucket = getDocBucket("/test/simple", BeanTemplateUtils.build(DocumentSchemaBean.class) .with(DocumentSchemaBean::deduplication_policy, DeduplicationPolicy.custom_update) .done().get()); final EnrichmentControlMetadataBean control = BeanTemplateUtils .build(EnrichmentControlMetadataBean.class).with(EnrichmentControlMetadataBean::name, "test") .done().get(); final DeduplicationService test_module = new DeduplicationService(); final IEnrichmentModuleContext enrich_context = Mockito.mock(IEnrichmentModuleContext.class); Mockito.when(enrich_context.getServiceContext()).thenReturn(_service_context); final Collection<BasicMessageBean> res = test_module.validateModule(enrich_context, test_bucket, control); // Check errors assertTrue("Errors = " + res.stream().map(b -> b.message()).collect(Collectors.joining(";")), res.isEmpty()); } // Child module fails { final DataBucketBean test_bucket = addTimestampField("@timestamp", getDocBucket("/test/custom/1", BeanTemplateUtils.build(DocumentSchemaBean.class) .with(DocumentSchemaBean::deduplication_policy, DeduplicationPolicy.custom) .with(DocumentSchemaBean::custom_deduplication_configs, Arrays.asList(BeanTemplateUtils .build(EnrichmentControlMetadataBean.class) .with(EnrichmentControlMetadataBean::name, "custom_test") .with(EnrichmentControlMetadataBean::config, new LinkedHashMap<>(ImmutableMap.<String, Object>of("fail", true))) .with(EnrichmentControlMetadataBean::entry_point, TestDedupEnrichmentModule.class.getName()) .done().get())) .done().get())); final EnrichmentControlMetadataBean control = BeanTemplateUtils .build(EnrichmentControlMetadataBean.class).with(EnrichmentControlMetadataBean::name, "test") .done().get(); final DeduplicationService test_module = new DeduplicationService(); final IEnrichmentModuleContext enrich_context = Mockito.mock(IEnrichmentModuleContext.class); Mockito.when(enrich_context.getServiceContext()).thenReturn(_service_context); final Collection<BasicMessageBean> res = test_module.validateModule(enrich_context, test_bucket, control); // Check errors assertFalse(res.isEmpty()); assertTrue(res.stream().allMatch(b -> !b.success())); } // Child module succeeds { final DataBucketBean test_bucket = addTimestampField("@timestamp", getDocBucket("/test/custom/1", BeanTemplateUtils.build(DocumentSchemaBean.class) .with(DocumentSchemaBean::deduplication_policy, DeduplicationPolicy.custom) .with(DocumentSchemaBean::custom_deduplication_configs, Arrays.asList(BeanTemplateUtils.build(EnrichmentControlMetadataBean.class) .with(EnrichmentControlMetadataBean::name, "custom_test") .with(EnrichmentControlMetadataBean::entry_point, TestDedupEnrichmentModule.class.getName()) .done().get())) .done().get())); final EnrichmentControlMetadataBean control = BeanTemplateUtils .build(EnrichmentControlMetadataBean.class).with(EnrichmentControlMetadataBean::name, "test") .done().get(); final DeduplicationService test_module = new DeduplicationService(); final IEnrichmentModuleContext enrich_context = Mockito.mock(IEnrichmentModuleContext.class); Mockito.when(enrich_context.getServiceContext()).thenReturn(_service_context); final Collection<BasicMessageBean> res = test_module.validateModule(enrich_context, test_bucket, control); // Check errors assertTrue("Errors = " + res.stream().map(b -> b.message()).collect(Collectors.joining(";")), res.isEmpty()); } }
From source file:com.ikanow.aleph2.example.flume_harvester.utils.FlumeUtils.java
/** Creates the flume config * @param aagent_name//from w ww .ja v a2 s.c om * @param bucket_config * @param context_signature * @param morphlines_config_path * @return */ public static String createFlumeConfig(final String agent_name, final DataBucketBean bucket, final FlumeBucketConfigBean bucket_config_in, final String context_signature, final Optional<String> morphlines_config_path, final boolean test_mode) { //TODO (ALEPH-10): security final FlumeBucketConfigBean bucket_config = createAutoFlumeConfig(bucket, bucket_config_in, test_mode); // Handle test mode changes (a set of user overrides) final Map<String, String> flume_config = Lambdas.get(() -> { if (test_mode) { // overwrite using test overwrites return Stream .concat(Optional.ofNullable(bucket_config.flume_config()).orElse(Collections.emptyMap()) .entrySet().stream(), Optional.ofNullable(bucket_config.flume_config_test_overrides()) .orElse(Collections.emptyMap()).entrySet().stream()) .collect(() -> new HashMap<String, String>(), (acc, kv) -> { final String val = kv.getValue() instanceof String ? (String) kv.getValue() : null; acc.put(kv.getKey(), val); }, (acc1, acc2) -> acc1.putAll(acc2)); } else return bucket_config.flume_config(); }); final String sub_prefix = Optional.ofNullable(bucket_config.substitution_prefix()).orElse("$$$$"); final String agent_prefix = agent_name + "."; final boolean sink_present = Optional.ofNullable(flume_config).map(m -> m.containsKey("sinks")) .orElse(false); final String[] channels = Optional.ofNullable(flume_config).map(m -> m.get("channels")) .map(c -> c.split("\\s+")).orElse(new String[0]); //TODO (ALEPH-10 add to test case) if ((channels.length > 1) && !sink_present) { throw new RuntimeException("If have multiple channels then cannot use implicit sink"); } //(not needed currently) // final Set<String> sinks = Optional.ofNullable(flume_config) // .map(m -> (String) m.get("sinks")) // .map(s -> Arrays.stream(s.split("\\s+")) // .collect(Collectors.toSet())) // .orElse(Collections.emptySet()); return Optional.of(Optional.ofNullable(bucket_config.flume_config_str())) .map(opt -> opt.map(ss -> ss + "\n").orElse("")) .map(s -> s + Optional.ofNullable(flume_config).map(cfg -> cfg.entrySet().stream() .filter(kv -> null != kv.getValue()) // (fields nulled out by the test override) .map(kv -> agent_prefix + decodeKey(kv.getKey()) + "=" + decodeValue(kv.getValue(), sub_prefix, morphlines_config_path, context_signature)) .collect(Collectors.joining("\n"))).filter(ss -> !ss.isEmpty()).orElse("")) .map(s -> sink_present ? s : (s + "\n" + agent_prefix + "sinks=aleph2_sink" + "\n" + agent_prefix + "sinks.aleph2_sink." + "channel=" + channels[0] + "\n" + agent_prefix + "sinks.aleph2_sink." + "type=com.ikanow.aleph2.example.flume_harvester.services.FlumeHarvesterSink" + "\n" + agent_prefix + "sinks.aleph2_sink." + "context_signature=" + encodeSignature(context_signature) + "\n")) .get(); }