Example usage for java.util.stream Collectors joining

List of usage examples for java.util.stream Collectors joining

Introduction

In this page you can find the example usage for java.util.stream Collectors joining.

Prototype

public static Collector<CharSequence, ?, String> joining(CharSequence delimiter) 

Source Link

Document

Returns a Collector that concatenates the input elements, separated by the specified delimiter, in encounter order.

Usage

From source file:com.thinkbiganalytics.jobrepo.rest.controller.FeedsRestController.java

private Collection<AlertSummaryGrouped> getAlerts(final String feedName, final String feedId) {
    return metadataAccess.read(() -> {

        String derivedFeedId = feedId;
        //get necessary feed info
        if (StringUtils.isBlank(feedId) && StringUtils.isNotBlank(feedName)) {
            //get the feedId for this feed name
            OpsManagerFeed feed = opsFeedManagerFeedProvider.findByName(feedName);
            if (feed != null) {
                derivedFeedId = feed.getId().toString();
            }/* w w  w .  j av  a2s . c  o  m*/
        }

        if (StringUtils.isBlank(derivedFeedId)) {
            return Collections.emptyList();
        }

        List<? extends ServiceLevelAgreementDescription> slas = serviceLevelAgreementDescriptionProvider
                .findForFeed(opsFeedManagerFeedProvider.resolveId(derivedFeedId));
        List<String> slaIds = new ArrayList<>();
        if (slas != null && !slas.isEmpty()) {
            slaIds = slas.stream().map(sla -> sla.getSlaId().toString()).collect(Collectors.toList());
        }
        List<String> ids = new ArrayList<>();
        ids.addAll(slaIds);
        ids.add(derivedFeedId);
        String filter = ids.stream().collect(Collectors.joining("||"));

        List<AlertSummary> alerts = new ArrayList<>();
        AlertCriteria criteria = alertProvider.criteria().state(Alert.State.UNHANDLED).orFilter(filter);
        alertProvider.getAlertsSummary(criteria).forEachRemaining(alerts::add);

        return alertsModel.groupAlertSummaries(alerts);
    });
}

From source file:io.soabase.halva.processor.caseclass.Templates.java

void addApplyBuilder(CaseClassSpec spec, TypeSpec.Builder builder, ClassName className,
        Optional<List<TypeVariableName>> typeVariableNames) {
    TypeName localCaseClassName = getLocalCaseClassName(className, typeVariableNames);

    List<ParameterSpec> parameters = spec.getItems().stream()
            .map(item -> ParameterSpec
                    .builder(environment.getGeneratedManager().toTypeName(item.getType()), item.getName())
                    .build())//from w  w w  . ja  v  a2 s . co  m
            .collect(Collectors.toList());

    String arguments = spec.getItems().stream().map(CaseClassItem::getName).collect(Collectors.joining(", "));
    CodeBlock.Builder codeBlockBuilder = CodeBlock.builder().addStatement("return new $L$L($L)",
            className.simpleName(), getDuck(typeVariableNames), arguments);

    MethodSpec.Builder methodSpecBuilder = MethodSpec.methodBuilder(className.simpleName())
            .returns(localCaseClassName).addParameters(parameters).addCode(codeBlockBuilder.build())
            .addModifiers(Modifier.PUBLIC, Modifier.STATIC);
    if (typeVariableNames.isPresent()) {
        methodSpecBuilder.addTypeVariables(typeVariableNames.get());
    }
    builder.addMethod(methodSpecBuilder.build());
}

From source file:com.hortonworks.streamline.streams.actions.storm.topology.StormTopologyActionsImpl.java

@Override
public void deploy(TopologyLayout topology, String mavenArtifacts, TopologyActionContext ctx, String asUser)
        throws Exception {
    ctx.setCurrentAction("Adding artifacts to jar");
    Path jarToDeploy = addArtifactsToJar(getArtifactsLocation(topology));
    ctx.setCurrentAction("Creating Storm topology YAML file");
    String fileName = createYamlFile(topology);
    ctx.setCurrentAction("Deploying topology via 'storm jar' command");
    List<String> commands = new ArrayList<String>();
    commands.add(stormCliPath);//from  w ww  .j  a  va  2  s. c  om
    commands.add("jar");
    commands.add(jarToDeploy.toString());
    commands.addAll(getExtraJarsArg(topology));
    commands.addAll(getMavenArtifactsRelatedArgs(mavenArtifacts));
    commands.addAll(getNimbusConf());
    commands.addAll(getSecuredClusterConf(asUser));
    commands.add("org.apache.storm.flux.Flux");
    commands.add("--remote");
    commands.add(fileName);
    LOG.info("Deploying Application {}", topology.getName());
    LOG.info(String.join(" ", commands));
    ShellProcessResult shellProcessResult = executeShellProcess(commands);
    int exitValue = shellProcessResult.exitValue;
    if (exitValue != 0) {
        LOG.error("Topology deploy command failed - exit code: {} / output: {}", exitValue,
                shellProcessResult.stdout);
        String[] lines = shellProcessResult.stdout.split("\\n");
        String errors = Arrays.stream(lines).filter(line -> line.startsWith("Exception"))
                .collect(Collectors.joining(", "));
        Pattern pattern = Pattern.compile("Topology with name `(.*)` already exists on cluster");
        Matcher matcher = pattern.matcher(errors);
        if (matcher.find()) {
            throw new TopologyAlreadyExistsOnCluster(matcher.group(1));
        } else {
            throw new Exception(
                    "Topology could not be deployed successfully: storm deploy command failed with " + errors);
        }
    }
}

From source file:com.ikanow.aleph2.shared.crud.mongodb.services.MongoDbCrudService.java

@Override
public CompletableFuture<Boolean> optimizeQuery(final List<String> ordered_field_list) {

    // Mongo appears to have an ~100 char list on the query, Fongo does not, so add a mannual check
    // so we don't get the situation where the tests work but it fails operationally

    String approx_index_name = ordered_field_list.stream().collect(Collectors.joining("."));
    if (approx_index_name.length() > 100) {
        throw new MongoException(ErrorUtils.get(ErrorUtils.MONGODB_INDEX_TOO_LONG, approx_index_name));
    }//from ww  w.  j av  a2  s .co  m

    return CompletableFuture.supplyAsync(() -> {
        final BasicDBObject index_keys = new BasicDBObject(ordered_field_list.stream()
                .collect(Collectors.toMap(f -> f, f -> 1, (v1, v2) -> 1, LinkedHashMap::new)));

        _state.orig_coll.createIndex(index_keys, new BasicDBObject("background", true));

        return true;
    });
}

From source file:com.haulmont.cuba.gui.components.filter.edit.FilterEditor.java

protected void checkGlobalDefaultAndCloseEditor() {
    List<FilterEntity> otherDefaultFilters = dataManager.loadList(LoadContext.create(FilterEntity.class)
            .setQuery(LoadContext//from w  ww . j  a va  2  s . co m
                    .createQuery("select f from sec$Filter f where f.globalDefault = true and "
                            + "f.componentId = :componentId and " + "f.id <> :currentId ")
                    .setParameter("componentId", filterEntity.getComponentId())
                    .setParameter("currentId", filterEntity.getId())));

    if (!otherDefaultFilters.isEmpty()) {
        String otherFilterNamesStr = otherDefaultFilters.stream().map(FilterEntity::getName)
                .collect(Collectors.joining(", "));
        showOptionDialog(getMessage("filter.editor.anotherGlobalDefaultFilterFound.dialogTitle"),
                formatMessage("filter.editor.anotherGlobalDefaultFilterFound.dialogMessage",
                        otherFilterNamesStr),
                MessageType.WARNING,
                new Action[] { new DialogAction(DialogAction.Type.YES, Action.Status.PRIMARY).withHandler(e -> {
                    otherDefaultFilters
                            .forEach(otherDefaultFilter -> otherDefaultFilter.setGlobalDefault(false));
                    modifiedGlobalDefaultFilters = dataManager.commit(new CommitContext(otherDefaultFilters));
                    close(COMMIT_ACTION_ID, true);
                }), new DialogAction(DialogAction.Type.NO, Action.Status.NORMAL).withHandler(e -> {
                    filterEntity.setGlobalDefault(false);
                    close(COMMIT_ACTION_ID, true);
                }), });
    } else {
        close(COMMIT_ACTION_ID, true);
    }
}

From source file:com.netflix.spinnaker.clouddriver.titus.client.RegionScopedTitusClient.java

private Map<String, List<com.netflix.titus.grpc.protogen.Task>> getTasks(List<String> jobIds,
        boolean includeDoneJobs) {
    TaskQuery.Builder taskQueryBuilder = TaskQuery.newBuilder();
    if (!jobIds.isEmpty()) {
        taskQueryBuilder.putFilteringCriteria("jobIds", jobIds.stream().collect(Collectors.joining(",")));
    }/*from  www.ja v  a 2  s.  c om*/
    if (titusRegion.getFeatureFlags().contains("jobIds")) {
        taskQueryBuilder.putFilteringCriteria("attributes", "source:spinnaker");
    }
    String filterByStates = "Launched,StartInitiated,Started";
    if (includeDoneJobs) {
        filterByStates = filterByStates + ",KillInitiated,Finished";
    }
    taskQueryBuilder.putFilteringCriteria("taskStates", filterByStates);

    List<com.netflix.titus.grpc.protogen.Task> tasks = getTasksWithFilter(taskQueryBuilder);
    return tasks.stream().collect(Collectors.groupingBy(com.netflix.titus.grpc.protogen.Task::getJobId));
}

From source file:com.esri.geoportal.commons.agp.client.AgpClient.java

/**
 * Adds item.// w  w w.j av a 2  s.c  o m
 * @param owner user name
 * @param folderId folder id (optional)
 * @param itemId item id
 * @param title title
 * @param description description
 * @param url URL
 * @param thumbnailUrl thumbnail URL
 * @param itemType item type (must be a URL type)
 * @param extent extent
 * @param typeKeywords type keywords
 * @param tags tags tags
 * @param token token
 * @return add item response
 * @throws URISyntaxException if invalid URL
 * @throws IOException if operation fails
 */
public ItemResponse updateItem(String owner, String folderId, String itemId, String title, String description,
        URL url, URL thumbnailUrl, ItemType itemType, Double[] extent, String[] typeKeywords, String[] tags,
        String token) throws IOException, URISyntaxException {
    URIBuilder builder = new URIBuilder(updateItemUri(owner, StringUtils.trimToNull(folderId), itemId));

    HttpPost req = new HttpPost(builder.build());
    HashMap<String, String> params = new HashMap<>();
    params.put("f", "json");
    params.put("title", title);
    params.put("description", description);
    params.put("type", itemType.getTypeName());
    params.put("url", url.toExternalForm());
    if (thumbnailUrl != null) {
        params.put("thumbnailurl", thumbnailUrl.toExternalForm());
    }
    if (extent != null && extent.length == 4) {
        params.put("extent",
                Arrays.asList(extent).stream().map(Object::toString).collect(Collectors.joining(",")));
    }
    if (typeKeywords != null) {
        params.put("typeKeywords", Arrays.asList(typeKeywords).stream().collect(Collectors.joining(",")));
    }
    if (tags != null) {
        params.put("tags", Arrays.asList(tags).stream().collect(Collectors.joining(",")));
    }
    params.put("token", token);

    req.setEntity(createEntity(params));

    return execute(req, ItemResponse.class);
}

From source file:com.ggvaidya.scinames.complexquery.ComplexQueryViewController.java

public void updateTableWithChanges(Project project, Set<Change> changesToDisplay, List<Dataset> datasets) {
    List<Change> changes = changesToDisplay.stream().sorted((a, b) -> a.getDataset().compareTo(b.getDataset()))
            .collect(Collectors.toList());

    NameClusterManager ncm = project.getNameClusterManager();

    // And add tablecolumns for the rest.
    dataTableView.getColumns().clear();//from  w ww . j  a  v  a2  s  . c om
    dataTableView.getColumns().addAll(createTableColumnFromChange("id", ch -> ch.getId().toString()),
            createTableColumnFromChange("dataset", ch -> ch.getDataset().getName()),
            createTableColumnFromChange("type", ch -> ch.getType().getType()),
            createTableColumnFromChange("from", ch -> ch.getFromString()),
            createTableColumnFromChange("from_name_cluster_ids",
                    ch -> ncm.getClusters(ch.getFrom()).stream().map(cl -> cl.getId().toString())
                            .collect(Collectors.joining(" and "))),
            createTableColumnFromChange("from_name_clusters",
                    ch -> ncm.getClusters(ch.getFrom()).stream()
                            .map(cl -> cl.getNames().stream().map(n -> n.getFullName())
                                    .collect(Collectors.joining("; ")))
                            .collect(Collectors.joining(" and "))),
            createTableColumnFromChange("to", ch -> ch.getToString()),
            createTableColumnFromChange("to_name_cluster_ids",
                    ch -> ncm.getClusters(ch.getTo()).stream().map(cl -> cl.getId().toString())
                            .collect(Collectors.joining(" and "))),
            createTableColumnFromChange("to_name_clusters",
                    ch -> ncm.getClusters(ch.getTo()).stream()
                            .map(cl -> cl.getNames().stream().map(n -> n.getFullName())
                                    .collect(Collectors.joining("; ")))
                            .collect(Collectors.joining(" and "))),
            createTableColumnFromChange("filter_status",
                    ch -> project.getChangeFilter().test(ch) ? "retained" : "eliminated"),
            createTableColumnFromChange("properties",
                    ch -> ch.getProperties().entrySet().stream()
                            .map(entry -> entry.getKey() + ": " + entry.getValue()).sorted()
                            .collect(Collectors.joining("; "))),
            createTableColumnFromChange("citations", ch -> ch.getCitationStream().map(cit -> cit.getCitation())
                    .sorted().collect(Collectors.joining("; "))));

    dataTableView.getItems().clear();
    dataTableView.getItems().addAll(changes);

    dataTableView.refresh();

    // Fill in status text field.
    statusTextField.setText(dataTableView.getItems().size() + " changes across "
            + changes.stream().map(ch -> ch.getDataset()).distinct().count() + " distinct datasets");
}

From source file:com.ikanow.aleph2.management_db.controllers.actors.TestBucketDeletionActor.java

@Test
public void test_bucketDeletionActor_purge_immediate() throws Exception {
    final Tuple2<String, ActorRef> host_actor = insertActor(TestActor_Accepter.class);

    final DataBucketBean bucket = createBucketInfrastructure("/test/purge/immediate", true);

    storeBucketAndStatus(bucket, true, host_actor._1());

    final ManagementFuture<Boolean> res = _core_mgmt_db.purgeBucket(bucket, Optional.empty());

    // check result
    assertTrue("Purge called succeeded: " + res.getManagementResults().get().stream().map(msg -> msg.message())
            .collect(Collectors.joining(";")), res.get());
    assertEquals(3, res.getManagementResults().get().size());

    //check system state afterwards

    // Full filesystem exists
    assertTrue("The file path has *not* been deleted", new File(System.getProperty("java.io.tmpdir")
            + File.separator + "data" + File.separator + bucket.full_name() + "/managed_bucket").exists());

    // Data directories no longer exist
    assertFalse("The data path has been deleted",
            new File(System.getProperty("java.io.tmpdir") + File.separator + "data" + File.separator
                    + bucket.full_name() + IStorageService.STORED_DATA_SUFFIX_PROCESSED + "/test").exists());

    // check state directory _not_ cleaned in this case (the harvester can always do this once that's been wired up):
    checkStateDirectoriesNotCleaned(bucket);

    // check mock index deleted:
    assertEquals(1, _mock_index._handleBucketDeletionRequests.size());
    final Collection<Tuple2<String, Object>> deletions = _mock_index._handleBucketDeletionRequests
            .get("handleBucketDeletionRequest");
    assertEquals(1, deletions.size());//from w w  w  .  ja v a2 s .  co m
    assertEquals("/test/purge/immediate", deletions.iterator().next()._1());
    assertEquals(false, deletions.iterator().next()._2());
    _mock_index._handleBucketDeletionRequests.clear();

    shutdownActor(host_actor._2());
}

From source file:io.bitsquare.gui.util.BSFormatter.java

public String arbitratorAddressesToString(List<NodeAddress> nodeAddresses) {
    return nodeAddresses.stream().map(NodeAddress::getFullAddress).collect(Collectors.joining(", "));
}