Example usage for java.lang String join

List of usage examples for java.lang String join

Introduction

In this page you can find the example usage for java.lang String join.

Prototype

public static String join(CharSequence delimiter, Iterable<? extends CharSequence> elements) 

Source Link

Document

Returns a new String composed of copies of the CharSequence elements joined together with a copy of the specified delimiter .

Usage

From source file:ai.grakn.graql.GraqlShell.java

/**
 * load the user's preferred editor to edit a query
 * @return the string written to the editor
 *//*from w  w  w .jav a  2s  . c om*/
private String runEditor() throws IOException {
    // Get preferred editor
    Map<String, String> env = System.getenv();
    String editor = Optional.ofNullable(env.get("EDITOR")).orElse(DEFAULT_EDITOR);

    // Run the editor, pipe input into and out of tty so we can provide the input/output to the editor via Graql
    ProcessBuilder builder = new ProcessBuilder("/bin/bash", "-c",
            editor + " </dev/tty >/dev/tty " + tempFile.getAbsolutePath());

    // Wait for user to finish editing
    try {
        builder.start().waitFor();
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }

    return String.join("\n", Files.readAllLines(tempFile.toPath()));
}

From source file:edu.harvard.iq.dataverse.datasetutility.AddReplaceFileHelper.java

/**
 * get error messages as string //  w w  w.  java 2s  .c o m
 * 
 * @param joinString
 * @return 
 */
public String getErrorMessagesAsString(String joinString) {
    if (joinString == null) {
        joinString = "\n";
    }
    return String.join(joinString, this.errorMessages);
}

From source file:org.elasticsearch.client.RequestConvertersTests.java

public void testDeleteIndex() {
    String[] indices = randomIndicesNames(0, 5);
    DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indices);

    Map<String, String> expectedParams = new HashMap<>();
    setRandomTimeout(deleteIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
    setRandomMasterTimeout(deleteIndexRequest, expectedParams);

    setRandomIndicesOptions(deleteIndexRequest::indicesOptions, deleteIndexRequest::indicesOptions,
            expectedParams);/*from  ww w.  j  a v  a2  s .  c  om*/

    Request request = RequestConverters.deleteIndex(deleteIndexRequest);
    assertEquals("/" + String.join(",", indices), request.getEndpoint());
    assertEquals(expectedParams, request.getParameters());
    assertEquals(HttpDelete.METHOD_NAME, request.getMethod());
    assertNull(request.getEntity());
}

From source file:com.vmware.photon.controller.deployer.xenon.workflow.BatchCreateManagementWorkflowService.java

private String logUnconvergedServices(Map<Long, Operation> os) {
    List<String> strings = new ArrayList<>();
    for (Operation o : os.values()) {
        NodeGroupState nodeGroupState = o.getBody(NodeGroupState.class);
        if (!serviceAvailable(nodeGroupState)) {
            strings.add(nodeGroupState.nodes.values().iterator().next().groupReference.toString());
        }//  w w w  .  j a  v  a 2s.c o  m
    }
    return String.join(",", strings);
}

From source file:ee.ria.xroad.common.util.CryptoUtils.java

/**
 * Calculates digest of the certificate and encodes it as uppercase hex with the given delimiter every 2 characters.
 * @param cert the certificate//www . j  a  v a 2s. c om
 * @param delimiter the delimiter to use
 * @return calculated certificate hex hash String
 * @throws Exception if any errors occur
 */
public static String calculateDelimitedCertHexHash(X509Certificate cert, String delimiter) throws Exception {
    return String.join(delimiter, Splitter.fixedLength(2).split(calculateCertHexHash(cert).toUpperCase()));
}

From source file:com.searchcode.app.jobs.IndexGitRepoJob.java

private List<CodeOwner> getBlameInfo(int codeLinesSize, String repoName, String repoLocations,
        String fileName) {//  ww  w .j  a v  a2  s .  c  o m
    List<CodeOwner> codeOwners = new ArrayList<>(codeLinesSize);
    try {
        // The / part is required due to centos bug for version 1.1.1
        // This appears to be correct
        String repoLoc = repoLocations + "/" + repoName + "/.git";

        Repository localRepository = new FileRepository(new File(repoLoc));
        BlameCommand blamer = new BlameCommand(localRepository);

        ObjectId commitID = localRepository.resolve("HEAD");

        if (commitID == null) {
            Singleton.getLogger().info("getBlameInfo commitID is null for " + repoLoc + " " + fileName);
            return codeOwners;
        }

        BlameResult blame;

        // Somewhere in here appears to be wrong...
        blamer.setStartCommit(commitID);
        blamer.setFilePath(fileName);
        blame = blamer.call();

        // Hail mary attempt to solve issue on CentOS Attempt to set at all costs
        if (blame == null) { // This one appears to solve the issue so don't remove it
            String[] split = fileName.split("/");
            blamer.setStartCommit(commitID);
            if (split.length != 1) {
                blamer.setFilePath(String.join("/", Arrays.asList(split).subList(1, split.length)));
            }
            blame = blamer.call();
        }
        if (blame == null) {
            String[] split = fileName.split("/");
            blamer.setStartCommit(commitID);
            if (split.length != 1) {
                blamer.setFilePath("/" + String.join("/", Arrays.asList(split).subList(1, split.length)));
            }
            blame = blamer.call();
        }

        if (blame == null) {
            Singleton.getLogger().info("getBlameInfo blame is null for " + repoLoc + " " + fileName);
        }

        if (blame != null) {
            // Get all the owners their number of commits and most recent commit
            HashMap<String, CodeOwner> owners = new HashMap<>();
            RevCommit commit;
            PersonIdent authorIdent;

            try {
                for (int i = 0; i < codeLinesSize; i++) {
                    commit = blame.getSourceCommit(i);
                    authorIdent = commit.getAuthorIdent();

                    if (owners.containsKey(authorIdent.getName())) {
                        CodeOwner codeOwner = owners.get(authorIdent.getName());
                        codeOwner.incrementLines();

                        int timestamp = codeOwner.getMostRecentUnixCommitTimestamp();

                        if (commit.getCommitTime() > timestamp) {
                            codeOwner.setMostRecentUnixCommitTimestamp(commit.getCommitTime());
                        }
                        owners.put(authorIdent.getName(), codeOwner);
                    } else {
                        owners.put(authorIdent.getName(),
                                new CodeOwner(authorIdent.getName(), 1, commit.getCommitTime()));
                    }
                }
            } catch (IndexOutOfBoundsException ex) {
                // Ignore this as its not really a problem or is it?
                Singleton.getLogger()
                        .info("IndexOutOfBoundsException when trying to get blame for " + repoName + fileName);
            }

            codeOwners = new ArrayList<>(owners.values());
        }

    } catch (IOException e) {
        e.printStackTrace();
    } catch (GitAPIException e) {
        e.printStackTrace();
    } catch (IllegalArgumentException e) {
        e.printStackTrace();
    }

    System.gc(); // Try to clean up
    return codeOwners;
}

From source file:com.netflix.spinnaker.halyard.deploy.spinnaker.v1.service.distributed.kubernetes.v2.KubernetesV2Service.java

default String connectCommand(AccountDeploymentDetails<KubernetesAccount> details,
        SpinnakerRuntimeSettings runtimeSettings) {
    ServiceSettings settings = runtimeSettings.getServiceSettings(getService());
    KubernetesAccount account = details.getAccount();
    String namespace = settings.getLocation();
    String name = getServiceName();
    int port = settings.getPort();

    String podNameCommand = String.join(" ",
            KubernetesV2Utils.kubectlPodServiceCommand(account, namespace, name));

    return String.join(" ",
            KubernetesV2Utils.kubectlConnectPodCommand(account, namespace, "$(" + podNameCommand + ")", port));
}

From source file:com.ggvaidya.scinames.ui.DataReconciliatorController.java

private void reconcileDataFromOneDataset() {
    Project project = dataReconciliatorView.getProjectView().getProject();
    String reconciliationMethod = reconcileUsingComboBox.getValue();
    Table<String, String, Set<String>> precalc = HashBasedTable.create();

    Dataset namesDataset = useNamesFromComboBox.getSelectionModel().getSelectedItem();
    List<NameCluster> nameClusters = null;
    List<Name> namesInDataset = null;

    // Set up namesInDataset.
    switch (namesToUseComboBox.getValue()) {
    case USE_NAMES_IN_DATASET_ROWS:
        if (namesDataset == ALL) {
            namesInDataset = project.getDatasets().stream().flatMap(ds -> ds.getNamesInAllRows().stream())
                    .distinct().sorted().collect(Collectors.toList());
        } else {/*  ww w.  jav a2 s.  c o m*/
            namesInDataset = namesDataset.getNamesInAllRows().stream().sorted().distinct()
                    .collect(Collectors.toList());
        }
        break;

    case USE_ALL_REFERENCED_NAMES:
        if (namesDataset == ALL) {
            namesInDataset = project.getDatasets().stream().flatMap(ds -> ds.getReferencedNames()).distinct()
                    .sorted().collect(Collectors.toList());
        } else {
            namesInDataset = namesDataset.getReferencedNames().sorted().collect(Collectors.toList());
        }

        break;

    case USE_ALL_RECOGNIZED_NAMES:
        if (namesDataset == ALL) {
            namesInDataset = project.getDatasets().stream()
                    .flatMap(ds -> project.getRecognizedNames(ds).stream()).distinct().sorted()
                    .collect(Collectors.toList());
        } else {
            namesInDataset = project.getRecognizedNames(namesDataset).stream().sorted()
                    .collect(Collectors.toList());
        }

        break;
    }

    // IMPORTANT NOTE
    // This algorithm now relies on nameClusters and namesInDataset
    // having EXACTLY the same size. So please make sure every combination
    // of logic here lines up exactly.

    boolean flag_nameClustersAreTaxonConcepts = false;
    switch (reconciliationMethod) {
    case RECONCILE_BY_NAME:
        // namesInDataset already has all the names we want.

        nameClusters = createSingleNameClusters(namesDataset, namesInDataset);

        break;

    case RECONCILE_BY_SPECIES_NAME:
        namesInDataset = namesInDataset.stream().filter(n -> n.hasSpecificEpithet())
                .flatMap(n -> n.asBinomial()).distinct().sorted().collect(Collectors.toList());

        nameClusters = createSingleNameClusters(namesDataset, namesInDataset);

        break;

    case RECONCILE_BY_SPECIES_NAME_CLUSTER:
        // nameClusters = project.getNameClusterManager().getSpeciesClustersAfterFiltering(project).collect(Collectors.toList());

        namesInDataset = namesInDataset.stream().filter(n -> n.hasSpecificEpithet())
                .flatMap(n -> n.asBinomial()).distinct().sorted().collect(Collectors.toList());

        nameClusters = project.getNameClusterManager().getClusters(namesInDataset);

        break;

    case RECONCILE_BY_NAME_CLUSTER:
        // Note that this includes genus name clusters!
        nameClusters = project.getNameClusterManager().getClusters(namesInDataset);

        break;

    case RECONCILE_BY_SPECIES_TAXON_CONCEPT:
        /*
         * WARNING: untested! Please test before using!
         */

        List<NameCluster> nameClustersByName = project.getNameClusterManager().getClusters(namesInDataset);

        List<Name> namesInDatasetCorresponding = new LinkedList<>();
        List<NameCluster> nameClustersCorresponding = new LinkedList<>();

        for (int x = 0; x < namesInDataset.size(); x++) {
            Name name = namesInDataset.get(0);
            NameCluster nameCluster = nameClustersByName.get(0);
            List<TaxonConcept> taxonConcepts;

            if (nameCluster == null) {
                taxonConcepts = new ArrayList<>();
            } else {
                taxonConcepts = nameCluster.getTaxonConcepts(project);
            }

            // Now we need to unwind this data structure: each entry in nameClusters  
            // should have a corresponding entry in namesInDataset.
            for (TaxonConcept tc : taxonConcepts) {
                namesInDatasetCorresponding.add(name);
                nameClustersCorresponding.add((NameCluster) tc);
            }
        }

        // All good? Let's swap in those variables to replace their actual counterparts.
        namesInDataset = namesInDatasetCorresponding;
        nameClusters = nameClustersCorresponding;

        // This is special, at least for now. Maybe some day it won't?
        flag_nameClustersAreTaxonConcepts = true;

        break;

    default:
        LOGGER.log(Level.SEVERE, "Reconciliation method ''{0}'' has not yet been implemented!",
                reconciliationMethod);
        return;
    }

    if (nameClusters == null) {
        dataTableView.setItems(FXCollections.emptyObservableList());
        return;
    }

    LOGGER.info("Name clusters ready to display: " + nameClusters.size() + " clusters");
    LOGGER.info("Based on " + namesInDataset.size() + " names from " + namesDataset + ": " + namesInDataset);

    // What columns do we have from the other dataset?
    Dataset dataDataset = includeDataFromComboBox.getSelectionModel().getSelectedItem();
    List<Dataset> datasets = null;
    if (dataDataset == ALL)
        datasets = project.getDatasets();
    else if (dataDataset == NONE)
        datasets = new ArrayList<>();
    else
        datasets = Arrays.asList(dataDataset);

    // Precalculate.
    List<String> existingColNames = new ArrayList<>();
    existingColNames.add("id");
    existingColNames.add("name");
    existingColNames.add("names_in_dataset");
    existingColNames.add("all_names_in_cluster");
    existingColNames.add("dataset_rows_for_name");
    existingColNames.add("name_cluster_id");
    // existingColNames.add("distinct_dataset_rows_for_name");

    // If these are taxon concepts, there's three other columns we want
    // to emit.
    if (flag_nameClustersAreTaxonConcepts) {
        existingColNames.add("starts_with");
        existingColNames.add("ends_with");
        existingColNames.add("is_ongoing");
    } else {
        existingColNames.add("taxon_concept_count");
        existingColNames.add("taxon_concepts");
        existingColNames.add("trajectory");
        existingColNames.add("trajectory_without_renames");
        existingColNames.add("trajectory_lumps_splits");
    }

    existingColNames.add("first_added_dataset");
    existingColNames.add("first_added_year");

    existingColNames.add("reconciliation_duplicate_of");

    // Precalculate all dataset rows.
    Map<Name, Set<DatasetRow>> datasetRowsByName = new HashMap<>();
    for (Dataset ds : datasets) {
        Map<Name, Set<DatasetRow>> rowsByName = ds.getRowsByName();

        // Merge into the main list.
        for (Name n : rowsByName.keySet()) {
            Set<DatasetRow> rows = rowsByName.get(n);

            if (!reconciliationMethod.equals(RECONCILE_BY_NAME)) {
                // If we're reconciling by binomial names, then
                // we should include binomial names for each row, too.
                Optional<Name> binomialName = n.asBinomial().findAny();
                if (binomialName.isPresent()) {
                    Set<DatasetRow> rowsForBinomial = rowsByName.get(binomialName.get());
                    if (rowsForBinomial != null)
                        rows.addAll(rowsForBinomial);

                    // Don't write this to the sub-binomial name,
                    // just write to the binomial name.
                    n = binomialName.get();
                }
            }

            if (!datasetRowsByName.containsKey(n))
                datasetRowsByName.put(n, new HashSet<>());

            datasetRowsByName.get(n).addAll(rows);
        }
    }

    LOGGER.info("Precalculating all dataset rows");

    // Finally, come up with unique names for every dataset we might have.
    Map<DatasetColumn, String> datasetColumnMap = new HashMap<>();

    existingColNames.addAll(datasets.stream().flatMap(ds -> ds.getColumns().stream()).distinct().map(col -> {
        String colName = col.getName();
        String baseName = colName;

        int uniqueCounter = 0;
        while (existingColNames.contains(colName)) {
            // Duplicate column name! Map it elsewhere.
            uniqueCounter++;
            colName = baseName + "." + uniqueCounter;
        }

        // Where did we map it to?
        datasetColumnMap.put(col, colName);

        // Okay, now return the new column name we need to create.
        return colName;
    }).collect(Collectors.toList()));

    LOGGER.info("Precalculating " + nameClusters.size() + " name clusters");

    // Make sure names and name clusters are unique, otherwise bail.
    // Earlier this was being ensured by keeping namesInDataset as a
    // Set, but since it's a List now, duplicates might sneak in.
    assert (namesInDataset.size() == new HashSet<>(namesInDataset).size());

    // Since it's a list, we can set it up so that it always corresponds to
    // the correct name cluster.
    assert (namesInDataset.size() == nameClusters.size());

    // Now, nameClusters should NOT be de-duplicated: we might have the same
    // cluster appear multiple times! If so, we'll set 
    // "reconciliation_duplicate_of" to point to the first reconciliation,
    // so we don't duplicate reconciliations.

    // Let's track which IDs we use for duplicated name clusters.
    Map<NameCluster, List<String>> idsForNameClusters = new HashMap<>();

    if (nameClusters.size() != new HashSet<>(nameClusters).size()) {

        LOGGER.warning("Clusters not unique: " + nameClusters.size() + " clusters found, but only "
                + new HashSet<>(nameClusters).size() + " are unique.");
    }

    // Track duplicates.
    Map<NameCluster, List<String>> clusterIDsPerNameCluster = new HashMap<>();

    int totalClusterCount = nameClusters.size();
    int currentClusterCount = 0;
    List<String> nameClusterIDs = new LinkedList<>();
    for (NameCluster cluster : nameClusters) {
        currentClusterCount++;

        // Probably don't need GUIDs here, right?
        String clusterID = String.valueOf(currentClusterCount);
        nameClusterIDs.add(clusterID);

        LOGGER.info("(" + currentClusterCount + "/" + totalClusterCount + ") Precalculating name cluster: "
                + cluster);

        precalc.put(clusterID, "id", getOneElementSet(clusterID));
        precalc.put(clusterID, "name_cluster_id", getOneElementSet(cluster.getId().toString()));

        // The 'name' should come from namesInDataset.
        precalc.put(clusterID, "name",
                getOneElementSet(namesInDataset.get(currentClusterCount - 1).getFullName()));

        // Okay, here's what we need to do:
        //   - If names is ALL, then we can't do better than cluster.getName().
        if (namesDataset == ALL) {
            precalc.put(clusterID, "names_in_dataset",
                    cluster.getNames().stream().map(n -> n.getFullName()).collect(Collectors.toSet()));
        } else {
            // hey, here's something cool we can do: figure out which name(s)
            // this dataset uses from this cluster!
            Set<Name> namesToFilterTo = new HashSet<>(namesInDataset);

            List<String> namesInCluster = cluster.getNames().stream().filter(n -> namesToFilterTo.contains(n))
                    .map(n -> n.getFullName()).collect(Collectors.toList());

            precalc.put(clusterID, "names_in_dataset", new HashSet<>(namesInCluster));
        }

        precalc.put(clusterID, "all_names_in_cluster",
                cluster.getNames().stream().map(n -> n.getFullName()).collect(Collectors.toSet()));

        // Is this a duplicate?
        if (clusterIDsPerNameCluster.containsKey(cluster)) {
            List<String> duplicatedRows = clusterIDsPerNameCluster.get(cluster);

            // Only the first one should have the actual data.

            precalc.put(clusterID, "reconciliation_duplicate_of", getOneElementSet(duplicatedRows.get(0)));
            duplicatedRows.add(clusterID);

            // Okay, do no other work on this cluster, since all the actual information is
            // in the other entry.
            continue;

        } else {
            precalc.put(clusterID, "reconciliation_duplicate_of", getOneElementSet("NA"));

            List<String> clusterIds = new LinkedList<>();
            clusterIds.add(clusterID);
            clusterIDsPerNameCluster.put(cluster, clusterIds);
        }

        LOGGER.fine("Cluster calculation began for " + cluster);

        // If it's a taxon concept, precalculate a few more columns.
        if (flag_nameClustersAreTaxonConcepts) {
            TaxonConcept tc = (TaxonConcept) cluster;

            precalc.put(clusterID, "starts_with",
                    tc.getStartsWith().stream().map(ch -> ch.toString()).collect(Collectors.toSet()));
            precalc.put(clusterID, "ends_with",
                    tc.getEndsWith().stream().map(ch -> ch.toString()).collect(Collectors.toSet()));
            precalc.put(clusterID, "is_ongoing", getOneElementSet(tc.isOngoing(project) ? "yes" : "no"));
        } else {
            // If it's a true name cluster, then perhaps people will want
            // to know what taxon concepts are in here? Maybe for some sort
            // of PhD?
            List<TaxonConcept> tcs = cluster.getTaxonConcepts(project);

            precalc.put(clusterID, "taxon_concept_count", getOneElementSet(String.valueOf(tcs.size())));
            precalc.put(clusterID, "taxon_concepts",
                    tcs.stream().map(tc -> tc.toString()).collect(Collectors.toSet()));
        }

        LOGGER.fine("Cluster calculation ended for " + cluster);

        // When was this first added?
        List<Dataset> foundInSorted = cluster.getFoundInSortedWithDates();
        if (!foundInSorted.isEmpty()) {
            precalc.put(clusterID, "first_added_dataset", getOneElementSet(foundInSorted.get(0).getCitation()));
            precalc.put(clusterID, "first_added_year",
                    getOneElementSet(foundInSorted.get(0).getDate().getYearAsString()));
        }

        LOGGER.fine("Trajectory began for " + cluster);

        // For name clusters we can also figure out trajectories!
        if (!flag_nameClustersAreTaxonConcepts) {
            List<String> trajectorySteps = cluster.getFoundInSortedWithDates().stream().map(dataset -> {
                String changes = dataset.getChanges(project).filter(ch -> cluster.containsAny(ch.getAllNames()))
                        .map(ch -> ch.getType().toString()).collect(Collectors.joining("|"));
                if (!changes.isEmpty())
                    return changes;

                // This can happen when a change is referenced without an explicit addition.
                if (cluster.containsAny(dataset.getReferencedNames().collect(Collectors.toList())))
                    return "referenced";
                else
                    return "missing";
            }).collect(Collectors.toList());

            precalc.put(clusterID, "trajectory", getOneElementSet(String.join(" -> ", trajectorySteps)));

            precalc.put(clusterID, "trajectory_without_renames", getOneElementSet(trajectorySteps.stream()
                    .filter(ch -> !ch.contains("rename")).collect(Collectors.joining(" -> "))));

            precalc.put(clusterID, "trajectory_lumps_splits",
                    getOneElementSet(
                            trajectorySteps.stream().filter(ch -> ch.contains("split") || ch.contains("lump"))
                                    .collect(Collectors.joining(" -> "))));
        }

        LOGGER.fine("Trajectory ended for " + cluster);

        // Okay, here's where we reconcile!
        LOGGER.fine("Reconciliation began for " + cluster);

        // Now we need to actually reconcile the data from these unique row objects.
        Set<DatasetRow> allDatasetRowsCombined = new HashSet<>();

        for (Name name : cluster.getNames()) {
            // We don't have to convert cluster names to binomial,
            // because the cluster formation -- or the hacky thing we do
            // for RECONCILE_SPECIES_NAME -- should already have done that!
            //
            // Where necessary, the previous code will automatically
            // set up datasetRowsByName so it matched binomial names.
            Set<DatasetRow> rowsToReconcile = datasetRowsByName.get(name);
            if (rowsToReconcile == null)
                continue;

            allDatasetRowsCombined.addAll(rowsToReconcile);

            Set<DatasetColumn> columns = rowsToReconcile.stream().flatMap(row -> row.getColumns().stream())
                    .collect(Collectors.toSet());

            for (DatasetColumn col : columns) {
                // We've precalculated column names.
                String colName = datasetColumnMap.get(col);

                // Make sure we get this column down into 'precalc'. 
                if (!precalc.contains(clusterID, colName))
                    precalc.put(clusterID, colName, new HashSet<>());

                // Add all values for all rows in this column.
                Set<String> vals = rowsToReconcile.stream().flatMap(row -> {
                    if (!row.hasColumn(col))
                        return Stream.empty();
                    else
                        return Stream.of(row.get(col));
                }).collect(Collectors.toSet());

                precalc.get(clusterID, colName).addAll(vals);

                LOGGER.fine("Added " + vals.size() + " rows under name cluster '" + cluster + "'");
            }
        }

        LOGGER.info("(" + currentClusterCount + "/" + totalClusterCount + ") Reconciliation completed for "
                + cluster);

        precalc.put(clusterID, "dataset_rows_for_name", getOneElementSet(allDatasetRowsCombined.size()));
    }

    // Set up table items.
    dataTableView.setItems(FXCollections.observableList(nameClusterIDs));

    LOGGER.info("Setting up columns: " + existingColNames);

    dataTableView.getColumns().clear();
    for (String colName : existingColNames) {
        dataTableView.getColumns().add(createColumnFromPrecalc(colName, precalc));
    }

    // Get distinct column names.
    Stream<String> colNames = precalc.cellSet().stream().map(set -> set.getColumnKey());

    // Eliminate columns that are in the existingColNames.
    colNames = colNames.filter(colName -> !existingColNames.contains(colName));

    // And add tablecolumns for the rest.
    List<TableColumn<String, String>> cols = colNames.distinct().sorted()
            .map(colName -> createColumnFromPrecalc(colName, precalc)).collect(Collectors.toList());
    dataTableView.getColumns().addAll(cols);
    dataTableView.refresh();

    // Fill in status text field.
    long distinctNameCount = precalc.cellSet().stream().map(cluster -> precalc.get(cluster, "name")).distinct()
            .count();
    String str_duplicates = "";
    if (distinctNameCount != dataTableView.getItems().size()) {
        str_duplicates = " for " + distinctNameCount + " distinct names";
    }

    statusTextField.setText(dataTableView.getItems().size() + " rows across " + cols.size()
            + " reconciled columns" + str_duplicates);

    LOGGER.info("All done!");
}

From source file:org.elasticsearch.client.RequestConvertersTests.java

public void testGetSettings() throws IOException {
    String[] indicesUnderTest = randomBoolean() ? null : randomIndicesNames(0, 5);

    GetSettingsRequest getSettingsRequest = new GetSettingsRequest().indices(indicesUnderTest);

    Map<String, String> expectedParams = new HashMap<>();
    setRandomMasterTimeout(getSettingsRequest, expectedParams);
    setRandomIndicesOptions(getSettingsRequest::indicesOptions, getSettingsRequest::indicesOptions,
            expectedParams);// w  w  w  . j a  v a2s . c  om

    setRandomLocal(getSettingsRequest, expectedParams);

    if (randomBoolean()) {
        // the request object will not have include_defaults present unless it is set to
        // true
        getSettingsRequest.includeDefaults(randomBoolean());
        if (getSettingsRequest.includeDefaults()) {
            expectedParams.put("include_defaults", Boolean.toString(true));
        }
    }

    StringJoiner endpoint = new StringJoiner("/", "/", "");
    if (indicesUnderTest != null && indicesUnderTest.length > 0) {
        endpoint.add(String.join(",", indicesUnderTest));
    }
    endpoint.add("_settings");

    if (randomBoolean()) {
        String[] names = randomBoolean() ? null : new String[randomIntBetween(0, 3)];
        if (names != null) {
            for (int x = 0; x < names.length; x++) {
                names[x] = randomAlphaOfLengthBetween(3, 10);
            }
        }
        getSettingsRequest.names(names);
        if (names != null && names.length > 0) {
            endpoint.add(String.join(",", names));
        }
    }

    Request request = RequestConverters.getSettings(getSettingsRequest);

    assertThat(endpoint.toString(), equalTo(request.getEndpoint()));
    assertThat(request.getParameters(), equalTo(expectedParams));
    assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME));
    assertThat(request.getEntity(), nullValue());
}

From source file:com.searchcode.app.jobs.repository.IndexBaseRepoJob.java

public String getBlameFilePath(String fileLocationFilename) {
    String[] split = fileLocationFilename.split("/");
    return String.join("/", Arrays.asList(split).subList(1, split.length));
}