List of usage examples for java.util.stream Collectors joining
public static Collector<CharSequence, ?, String> joining(CharSequence delimiter)
From source file:com.twosigma.beakerx.inspect.Inspect.java
public String parseMethodsInfo(List<MethodInspect> methods, String className) { if (methods == null) { return ""; }/*from w w w . jav a 2 s. co m*/ String parsedMethods = methods.stream() .map(m -> COLOR_RED + "Signature: " + COLOR_RESET + className + (className.equals("") ? "" : ".") + m.getMethodName() + "(" + m.getSignature() + ")" + "\n" + COLOR_RED + "JavaDoc: " + (m.getJavadoc().equals("") ? "<no JavaDoc>" : COLOR_RESET + m.getJavadoc())) .collect(Collectors.joining("\n\n")); return parsedMethods; }
From source file:com.epam.ta.reportportal.core.widget.content.GeneralFilterStrategy.java
/** * {@link TestItem} doen't has/*from w ww.ja v a 2 s . co m*/ * {@link com.epam.ta.reportportal.database.entity.Project} field, in this * case<br> * we should find all launches by project and mode(not Debug mode) and use * them for searching<br> * testItem using "In" filter. * * @param projectName * @return FilterCondition */ private FilterCondition getConditionForTestItem(String projectName) { Filter filter = new Filter(Launch.class, Condition.EQUALS, false, projectName, Launch.PROJECT); filter.addCondition(modeCondition); List<Launch> launches = launchRepository.findIdsByFilter(filter); final String value = launches.stream().map(Launch::getId) .collect(Collectors.joining(Condition.VALUES_SEPARATOR)); return new FilterCondition(Condition.IN, false, value, TestItem.LAUNCH_CRITERIA); }
From source file:com.ikanow.aleph2.enrichment.utils.services.SimpleRegexFilterService.java
/** Utility to build a regex out of a list of patterns * @param config//from ww w. j a v a 2 s. c o m * @return */ protected static Pattern buildRegex(final RegexConfig config) { final String regex = config.regexes().stream().map(s -> "(?:" + s + ")").collect(Collectors.joining("|")); return Pattern.compile(regex, parseFlags(config.flags())); }
From source file:net.ljcomputing.ecsr.security.service.impl.JwtTokenServiceImpl.java
/** * @see net.ljcomputing.ecsr.security.service.impl.JwtTokenService * #create(org.springframework.security.core.Authentication) *///from w w w . j a va 2 s .c o m @Override public String create(final Authentication authentication) { if (authentication == null) { LOGGER.error("NO TOKEN"); throw new BadCredentialsException("No authentication provided."); } final String authorities = authentication.getAuthorities().stream() // NOPMD .map(authority -> authority.getAuthority()).collect(Collectors.joining(",")); final String result = Jwts.builder() // NOPMD .setSubject(authentication.getName()).setIssuer(tokenIssuer).setIssuedAt(now()) .setExpiration(expirationDate()).claim(WebSecurityConfiguration.AUTHORITIES_KEY, authorities) .signWith(SignatureAlgorithm.HS512, tokenSigningKey).compact(); if (!isValid(result)) { throw new BadCredentialsException("Token is invalid"); } return result; }
From source file:com.ikanow.aleph2.analytics.spark.services.TestEnrichmentPipelineService.java
@Test public void test_groupingBehavior() { //(quickly test the whole thing works!) {/* w w w . j av a2 s .c o m*/ JavaRDD<String> test = _spark.parallelize(Arrays.asList("a", "b", "c")); assertEquals(3L, test.map(s -> s + "X").count()); } // (sample group) { JavaRDD<Tuple2<String, String>> test = _spark.parallelize( Arrays.asList(Tuples._2T("a", "resa1"), Tuples._2T("b", "resb1"), Tuples._2T("a", "resa2"))); assertEquals(2L, test.groupBy(t2 -> t2._1()).map(key_lt2 -> { System.out.println("key=" + key_lt2._1() + ".. vals = " + Optionals.streamOf(key_lt2._2(), false) .map(t2 -> t2.toString()).collect(Collectors.joining(";"))); return null; }).count()); } }
From source file:org.mascherl.example.page.MailComposePage.java
@GET @Path("/mail/compose/{mailUuid}") public Observable<MascherlPage> compose(@PathParam("mailUuid") String mailUuid) { User localUser = MascherlSession.getInstance().get("user", User.class); Observable<List<MailAddressUsage>> sendToAddressesObservable = composeMailService .getLastSendToAddressesAsync(localUser, RECEIVER_HINT_MAX_ADDRESSES).toList() .timeout(500, TimeUnit.MILLISECONDS, Observable.just(Collections.emptyList())) .onErrorReturn((throwable) -> Collections.emptyList()); Observable<List<MailAddressUsage>> receivedAddressesObservable = composeMailServiceAsync .getLastReceivedFromAddresses(localUser, RECEIVER_HINT_MAX_ADDRESSES) .timeout(500, TimeUnit.MILLISECONDS, Observable.just(Collections.emptyList())) .onErrorReturn((throwable) -> Collections.emptyList()); return sendToAddressesObservable.zipWith(receivedAddressesObservable, (sendToList, receivedFromList) -> { List<MailAddressUsage> addresses = new ArrayList<>(RECEIVER_HINT_MAX_ADDRESSES * 2); if (receivedFromList != null) { addresses.addAll(receivedFromList); }/*from w ww. j a v a 2s. c om*/ if (sendToList != null) { addresses.addAll(sendToList); } return addresses.stream().distinct().sorted((u1, u2) -> u2.getDateTime().compareTo(u1.getDateTime())) .limit(RECEIVER_HINT_MAX_ADDRESSES).collect(Collectors.toList()); }).zipWith(composeMailServiceAsync.openDraft(mailUuid, localUser), (List<MailAddressUsage> receiverHintList, Mail mail) -> Mascherl .page("/templates/mail/mailCompose.html").pageTitle("Compose - WebMail powered by Mascherl") .container("userInfo", (model) -> model.put("user", localUser)) .container("pageContent", (model) -> { if (mail != null) { model.put("mail", convertToPageModelForEdit(mail)); } String receiverHint = receiverHintList.stream() .map((usage) -> usage.getMailAddress().getAddress()) .collect(Collectors.joining(", ")); if (!receiverHint.isEmpty()) { model.put("receiverHint", receiverHint); } })) .onErrorReturn((throwable) -> { if (throwable instanceof IllegalStateException) { return Mascherl.deferredPage(() -> mailDetailPage.mailDetail(mailUuid) .replaceUrl( UriBuilder.fromMethod(MailDetailPage.class, "mailDetail").build(mailUuid)) .pageGroup("MailDetailPage")); } throw (RuntimeException) throwable; }); }
From source file:org.elasticsearch.backwards.IndexingIT.java
public void testIndexVersionPropagation() throws Exception { Nodes nodes = buildNodeAndVersions(); assumeFalse("new nodes is empty", nodes.getNewNodes().isEmpty()); logger.info("cluster discovered: {}", nodes.toString()); final List<String> bwcNamesList = nodes.getBWCNodes().stream().map(Node::getNodeName) .collect(Collectors.toList()); final String bwcNames = bwcNamesList.stream().collect(Collectors.joining(",")); Settings.Builder settings = Settings.builder().put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) .put("index.routing.allocation.include._name", bwcNames); final String index = "test"; final int minUpdates = 5; final int maxUpdates = 10; createIndex(index, settings.build()); try (RestClient newNodeClient = buildClient(restClientSettings(), nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { int nUpdates = randomIntBetween(minUpdates, maxUpdates); logger.info("indexing docs with [{}] concurrent updates initially", nUpdates); final int finalVersionForDoc1 = indexDocWithConcurrentUpdates(index, 1, nUpdates); logger.info("allowing shards on all nodes"); updateIndexSetting(index, Settings.builder().putNull("index.routing.allocation.include._name")); ensureGreen();/* w ww . ja va 2 s.c om*/ assertOK(client().performRequest("POST", index + "/_refresh")); List<Shard> shards = buildShards(nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 1, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc1); assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 1); } nUpdates = randomIntBetween(minUpdates, maxUpdates); logger.info("indexing docs with [{}] concurrent updates after allowing shards on all nodes", nUpdates); final int finalVersionForDoc2 = indexDocWithConcurrentUpdates(index, 2, nUpdates); assertOK(client().performRequest("POST", index + "/_refresh")); shards = buildShards(nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 2, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc2); assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 2); } Shard primary = buildShards(nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get(); logger.info("moving primary to new node by excluding {}", primary.getNode().getNodeName()); updateIndexSetting(index, Settings.builder().put("index.routing.allocation.exclude._name", primary.getNode().getNodeName())); ensureGreen(); nUpdates = randomIntBetween(minUpdates, maxUpdates); logger.info("indexing docs with [{}] concurrent updates after moving primary", nUpdates); final int finalVersionForDoc3 = indexDocWithConcurrentUpdates(index, 3, nUpdates); assertOK(client().performRequest("POST", index + "/_refresh")); shards = buildShards(nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 3, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc3); assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 3); } logger.info("setting number of replicas to 0"); updateIndexSetting(index, Settings.builder().put("index.number_of_replicas", 0)); ensureGreen(); nUpdates = randomIntBetween(minUpdates, maxUpdates); logger.info("indexing doc with [{}] concurrent updates after setting number of replicas to 0", nUpdates); final int finalVersionForDoc4 = indexDocWithConcurrentUpdates(index, 4, nUpdates); assertOK(client().performRequest("POST", index + "/_refresh")); shards = buildShards(nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 4, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc4); assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 4); } logger.info("setting number of replicas to 1"); updateIndexSetting(index, Settings.builder().put("index.number_of_replicas", 1)); ensureGreen(); nUpdates = randomIntBetween(minUpdates, maxUpdates); logger.info("indexing doc with [{}] concurrent updates after setting number of replicas to 1", nUpdates); final int finalVersionForDoc5 = indexDocWithConcurrentUpdates(index, 5, nUpdates); assertOK(client().performRequest("POST", index + "/_refresh")); shards = buildShards(nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 5, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc5); assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 5); } // the number of documents on the primary and on the recovered replica should match the number of indexed documents assertCount(index, "_primary", 5); assertCount(index, "_replica", 5); } }
From source file:eu.stamp_project.automaticbuilder.MavenAutomaticBuilder.java
@Override public void runPit(String pathToRootOfProject, CtType<?>... testClasses) { try {// w w w .ja v a 2s . c om org.apache.commons.io.FileUtils.deleteDirectory(new File(pathToRootOfProject + "/target/pit-reports")); } catch (Exception ignored) { } try { String[] phases = new String[] { CMD_PIT_MUTATION_COVERAGE + ":" + PitMutantScoreSelector.pitVersion + ":" + GOAL_PIT_MUTATION_COVERAGE, // OPT_WITH_HISTORY, // OPT_TARGET_CLASSES + configuration.getProperty("filter"), // OPT_VALUE_REPORT_DIR, // OPT_VALUE_FORMAT, // OPT_VALUE_TIMEOUT, // OPT_VALUE_MEMORY, // OPT_TARGET_TESTS + Arrays.stream(testClasses).map(DSpotUtils::ctTypeToFullQualifiedName) .collect(Collectors.joining(",")), // OPT_ADDITIONAL_CP_ELEMENTS + "target/dspot/dependencies/" + (configuration.getProperty(PROPERTY_ADDITIONAL_CP_ELEMENTS) != null ? "," + configuration.getProperty(PROPERTY_ADDITIONAL_CP_ELEMENTS) : ""), // PitMutantScoreSelector.descartesMode ? OPT_MUTATION_ENGINE_DESCARTES : OPT_MUTATION_ENGINE_DEFAULT, PitMutantScoreSelector.descartesMode ? "" : OPT_MUTATORS + VALUE_MUTATORS_ALL, // configuration.getProperty(PROPERTY_EXCLUDED_CLASSES) != null ? OPT_EXCLUDED_CLASSES + configuration.getProperty(PROPERTY_EXCLUDED_CLASSES) : ""// }; if (this.runGoals(pathToRootOfProject, phases) != 0) { throw new RuntimeException( "Maven build failed! Enable verbose mode for more information (--verbose)"); } } catch (Exception e) { throw new RuntimeException(e); } }
From source file:com.marklogic.entityservices.examples.ExamplesBase.java
public void importRDF(Path referenceDataDir, String collection) { logger.info("RDF Load Job started"); WriteHostBatcher batcher = moveMgr.newWriteHostBatcher().withBatchSize(10).withThreadCount(1) .withTransform(new ServerTransform("turtle-to-xml")) .onBatchSuccess((client, batch) -> logger.info("Loaded rdf data batch")) .onBatchFailure((client, batch, throwable) -> { logger.error("FAILURE on batch:" + batch.toString() + "\n", throwable); System.err.println(throwable.getMessage()); System.err.println(Arrays.stream(batch.getItems()).map(item -> item.getTargetUri()) .collect(Collectors.joining("\n"))); // throwable.printStackTrace(); });//from ww w.j av a2 s . c o m ; ticket = moveMgr.startJob(batcher); importOrDescend(referenceDataDir, batcher, collection, Format.TEXT); batcher.flush(); }
From source file:com.ikanow.aleph2.data_import_manager.analytics.utils.TestAnalyticTriggerCrudUtils.java
@Test public void test_storeOrUpdateTriggerStage_relativeCheckTime() throws InterruptedException { assertEquals(0, _test_crud.countObjects().join().intValue()); // Same as start to test_storeOrUpdateTriggerStage_updateActivation, except check that the next check isn't scheduled immediately final DataBucketBean bucket = BeanTemplateUtils.clone(buildBucket("/test/store/trigger", true)) .with(DataBucketBean::poll_frequency, "2am tomorrow").done(); // Save a bucket {/* w w w . j av a 2 s. co m*/ final Stream<AnalyticTriggerStateBean> test_stream = AnalyticTriggerBeanUtils .generateTriggerStateStream(bucket, false, Optional.empty()); final List<AnalyticTriggerStateBean> test_list = test_stream.collect(Collectors.toList()); System.out.println("Resources = \n" + test_list.stream() .map(t -> BeanTemplateUtils.toJson(t).toString()).collect(Collectors.joining("\n"))); assertEquals(8L, test_list.size()); //(8 not 7 cos haven't dedup'd yet) // 4 internal dependencies assertEquals(4L, test_list.stream().filter(t -> null != t.job_name()).count()); // 4 external dependencies assertEquals(4L, test_list.stream().filter(t -> null == t.job_name()).count()); final Map<Tuple2<String, String>, List<AnalyticTriggerStateBean>> grouped_triggers = test_list.stream() .collect(Collectors.groupingBy(t -> Tuples._2T(t.bucket_name(), null))); AnalyticTriggerCrudUtils.storeOrUpdateTriggerStage(bucket, _test_crud, grouped_triggers).join(); assertEquals(7L, _test_crud.countObjects().join().intValue()); // Only the internal triggers are scheduled for an immediate check assertEquals(4L, _test_crud .countObjectsBySpec(CrudUtils.allOf(AnalyticTriggerStateBean.class) .rangeBelow(AnalyticTriggerStateBean::next_check, new Date(), false)) .join().intValue()); } }