Example usage for javafx.collections FXCollections observableList

List of usage examples for javafx.collections FXCollections observableList

Introduction

In this page you can find the example usage for javafx.collections FXCollections observableList.

Prototype

public static <E> ObservableList<E> observableList(List<E> list) 

Source Link

Document

Constructs an ObservableList that is backed by the specified list.

Usage

From source file:com.bdb.weather.display.day.DayRainPane.java

/**
 * Load the data into the plot./*from  w w  w  .  ja  va2s. c  o  m*/
 * 
 * @param hourlyRain The data to load
 * @param records The list of historical records
 */
public void loadData(DayHourRain hourlyRain, List<HistoricalRecord> records) {
    if (hourlyRain == null) {
        dataTable.setItems(null);
        rainPlot.setDataset(null);
        return;
    }

    List<RainItem> rows = new ArrayList<>();
    DefaultCategoryDataset rainDataset = new DefaultCategoryDataset();

    int n = 0;

    Set<Integer> hours = hourlyRain.getHourValues();
    for (int hour : hours) {
        String label = buildHourLabel(hour);
        rainDataset.addValue(hourlyRain.getRain(hour).get(), RAIN_ROW_KEY, label);
        Depth et = new Depth(0.0);
        for (HistoricalRecord record : records) {
            if (record.getTime().getHour() == n && record.getEvapotranspiration() != null)
                et = et.add(record.getEvapotranspiration());
        }

        rainDataset.addValue(et.get(), ET_ROW_KEY, label);

        n++;
        RainItem row = new RainItem(hour, hourlyRain.getRain(hour), et);
        rows.add(row);
    }
    dataTable.setItems(FXCollections.observableList(rows));

    rainPlot.setDataset(rainDataset);
}

From source file:app.order.OrderController.java

public void updateCheckTable(Order order) {
    checks = FXCollections.observableList(order.getBill().getChecks());
    checkTable.setItems(checks);
}

From source file:com.bdb.weather.display.day.DayXYPlotPane.java

/**
 * Load the data into the JFreeChart time series and into the Table Model
 * //from  www.  j  a  v  a 2s . c  o m
 * @param records The list of historical records
 */
protected void loadDataSeries(List<HistoricalRecord> records) {
    entries.stream().forEach((entry) -> {
        entry.timeSeries.clear();
    });

    ObservableList<HistoricalRecord> dataModel = FXCollections.observableList(records);
    dataTable.setItems(dataModel);

    getPlot().getRangeAxis().setAutoRange(true);

    records.stream().forEach((r) -> {
        RegularTimePeriod p = RegularTimePeriod.createInstance(Minute.class,
                TimeUtils.localDateTimeToDate(r.getTime()), TimeZone.getDefault());

        entries.stream().forEach((entry) -> {
            Measurement m = entry.seriesInfo.getValue(r);
            if (m != null) {
                entry.timeSeries.add(p, m.get());
            }
        });
    });

    displaySeries(datasetLeft, datasetRight);
}

From source file:com.esri.geoevent.test.performance.ui.ReportOptionsController.java

private ObservableList<ReportType> getReportTypes() {
    ArrayList<ReportType> list = new ArrayList<ReportType>();
    list.add(ReportType.XLSX);/*from ww w  .j  a  v a2s.c om*/
    list.add(ReportType.CSV);
    return FXCollections.observableList(list);
}

From source file:com.esri.geoevent.test.performance.ui.ReportOptionsController.java

private void updateSelectedReportColumns(List<String> selectedColumnsList) {
    List<String> currentSelection = selectedColumnsList;
    if (currentSelection == null || currentSelection.size() == 0) {
        currentSelection = new ArrayList<String>(AbstractFileRollOverReportWriter.getDefaultColumnNames());
    }//from   w ww.  j a  v a  2s . co m

    // remove all items from the all list
    List<String> allColumnsList = new ArrayList<String>(AbstractFileRollOverReportWriter.getAllColumnNames());
    allColumnsList.removeAll(currentSelection);
    allColumns.setItems(FXCollections.observableList(allColumnsList));

    // update the selected list
    selectedColumns.setItems(FXCollections.observableList(currentSelection));
}

From source file:com.loop81.fxcomparer.FXComparerController.java

/** 
 * Run the compare by embedding the call to {@link Comparer#compare(ComparableArchive, ComparableArchive)} into
 * a {@link Task} since the operation might take some time depending on the client machine and the archives size
 * and we do not like to hang the UI-thread. 
 *//*w  ww .  j a v a  2s.c o m*/
private void initCompare() {
    new Thread(new Task<ComparisonResult>() {
        @Override
        protected ComparisonResult call() throws Exception {
            return comparer.compare(archive1, archive2);
        }

        @Override
        protected void succeeded() {
            ComparisonResult result = getValue();
            if (result.isSame()) {
                labelCompareResult.setText(MessageBundle.getString("result.same"));
            } else {
                long diff = archive2.getSize() - archive1.getSize();
                labelCompareResult.setText(MessageBundle.getString("result.different",
                        FileUtils.byteCountToDisplaySize(archive1.getSize()),
                        FileUtils.byteCountToDisplaySize(archive2.getSize()),
                        convertdifferenceToReadableString(diff)));
            }

            compareTable.setItems(FXCollections.observableList(result.getEntries()));
        }

        @Override
        protected void failed() {
            new AlertDialog(MessageBundle.getString("exceptions.archive.compare")).show();
            getException().printStackTrace();
        }
    }).start();
}

From source file:com.ggvaidya.scinames.ui.DatasetDiffController.java

private void displayRows(List<DatasetRow> rows) {
    ObservableList<TableColumn> cols = comparisonTableView.getColumns();
    cols.clear();/*w  w  w . j av  a2 s.  c  o m*/

    List<DatasetColumn> datasetCols = rows.stream().flatMap(row -> row.getColumns().stream()).distinct()
            .collect(Collectors.toList());
    for (DatasetColumn datasetCol : datasetCols) {
        cols.add(createTableColumnForDatasetColumn(datasetCol.getName(), datasetCol));
    }

    // Add the by-unique before the columns.
    Function<DatasetRow, String> uniqueMap = getByUniqueMap();
    cols.add(0, createTableColumnForDatasetRow("Unique", row -> truncateString(uniqueMap.apply(row), 30)));

    // Add the dataset after the columns.
    cols.add(createTableColumnForDatasetRow("Dataset", row -> row.getDataset().getCitation()));

    comparisonTableView.setItems(FXCollections.observableList(rows));
    statusTextField.setText("Displaying " + rows.size() + " rows across " + cols.size() + " columns");
}

From source file:com.ggvaidya.scinames.dataset.DatasetSceneController.java

private void fillTableWithChanges(TableView<Change> tv, Dataset tp) {
    // Preserve search order and selected item.
    List<TableColumn<Change, ?>> sortByCols = new LinkedList<>(tv.getSortOrder());
    List<Change> selectedChanges = new LinkedList<>(tv.getSelectionModel().getSelectedItems());

    LOGGER.info("About to set changes table items: sortByCols = " + sortByCols + ", selectedChanges = "
            + selectedChanges);//from w w  w  . j a  v  a  2  s . c o  m
    tv.setItems(FXCollections.observableList(tp.getAllChangesAsList()));
    LOGGER.info("tv.setItems() completed");

    for (Change ch : selectedChanges) {
        tv.getSelectionModel().select(ch);
    }
    tv.getSortOrder().addAll(sortByCols);
    LOGGER.info("fillTableWithChanges() completed");
}

From source file:com.ggvaidya.scinames.complexquery.ComplexQueryViewController.java

public void updateTableWithNameClusters(Project project, List<NameCluster> nameClusters,
        List<Dataset> datasets) {
    Table<NameCluster, String, Set<String>> precalc = HashBasedTable.create();

    if (nameClusters == null) {
        dataTableView.setItems(FXCollections.emptyObservableList());
        return;//www .j a va 2 s  .  c om
    }
    boolean flag_nameClustersAreTaxonConcepts = false;

    if (nameClusters.size() > 0 && TaxonConcept.class.isAssignableFrom(nameClusters.get(0).getClass()))
        flag_nameClustersAreTaxonConcepts = true;
    dataTableView.setItems(FXCollections.observableList(nameClusters));

    // Precalculate.
    List<String> existingColNames = new ArrayList<>();
    existingColNames.add("id");
    existingColNames.add("name");
    existingColNames.add("names_in_dataset");
    existingColNames.add("all_names_in_cluster");

    // If these are taxon concepts, there's three other columns we want
    // to emit.
    if (flag_nameClustersAreTaxonConcepts) {
        existingColNames.add("name_cluster_id");
        existingColNames.add("starts_with");
        existingColNames.add("ends_with");
        existingColNames.add("is_ongoing");
    } else {
        existingColNames.add("taxon_concept_count");
        existingColNames.add("taxon_concepts");
    }

    // Set<Name> recognizedNamesInDataset = namesDataset.getRecognizedNames(project).collect(Collectors.toSet());

    for (NameCluster cluster : nameClusters) {
        precalc.put(cluster, "id", getOneElementSet(cluster.getId().toString()));

        // Okay, here's what we need to do:
        //   - If names is ALL, then we can't do better than cluster.getName().
        // if(namesDataset == ALL) {
        precalc.put(cluster, "names_in_dataset",
                cluster.getNames().stream().map(n -> n.getFullName()).collect(Collectors.toSet()));
        precalc.put(cluster, "name", getOneElementSet(cluster.getName().getFullName()));
        //} else {
        /*
           // hey, here's something cool we can do: figure out which name(s)
           // this dataset uses from this cluster!
           List<String> namesInDataset = cluster.getNames().stream()
              .filter(n -> recognizedNamesInDataset.contains(n))
              .map(n -> n.getFullName())
              .collect(Collectors.toList());
           String firstName = "";
           if(namesInDataset.size() > 0)
              firstName = namesInDataset.get(0);
                   
           precalc.put(cluster, "names_in_dataset", new HashSet<>(namesInDataset));
           precalc.put(cluster, "name", getOneElementSet(firstName));            
        }*/

        precalc.put(cluster, "all_names_in_cluster",
                cluster.getNames().stream().map(n -> n.getFullName()).collect(Collectors.toSet()));

        // If it's a taxon concept, precalculate a few more columns.
        if (flag_nameClustersAreTaxonConcepts) {
            TaxonConcept tc = (TaxonConcept) cluster;

            precalc.put(cluster, "name_cluster_id", getOneElementSet(tc.getNameCluster().getId().toString()));
            precalc.put(cluster, "starts_with",
                    tc.getStartsWith().stream().map(ch -> ch.toString()).collect(Collectors.toSet()));
            precalc.put(cluster, "ends_with",
                    tc.getEndsWith().stream().map(ch -> ch.toString()).collect(Collectors.toSet()));
            precalc.put(cluster, "is_ongoing", getOneElementSet(tc.isOngoing(project) ? "yes" : "no"));
        } else {
            // If it's a true name cluster, then perhaps people will want
            // to know what taxon concepts are in here? Maybe for some sort
            // of PhD?
            List<TaxonConcept> tcs = cluster.getTaxonConcepts(project);

            precalc.put(cluster, "taxon_concept_count", getOneElementSet(String.valueOf(tcs.size())));
            precalc.put(cluster, "taxon_concepts",
                    tcs.stream().map(tc -> tc.toString()).collect(Collectors.toSet()));
        }

        // Okay, here's where we reconcile!
        for (Name n : cluster.getNames()) {
            // TODO: there's probably an optimization here, in which we should
            // loop on the smaller set (either loop on 'datasets' and compare
            // to cluster, or loop on cluster.foundIn and compare to 'datasets').
            for (Dataset ds : datasets) {
                Map<Name, Set<DatasetRow>> rowsByName = ds.getRowsByName();

                // Are we included in this name cluster? If not, skip!
                if (!cluster.getFoundIn().contains(ds))
                    continue;

                // Check to see if we have any rows for this name; if not, skip.
                if (!rowsByName.containsKey(n))
                    continue;

                Set<DatasetRow> matched = rowsByName.get(n);
                LOGGER.log(Level.FINER, "Adding {0} rows under name ''{1}''",
                        new Object[] { matched.size(), n.getFullName() });

                Map<Set<DatasetColumn>, List<DatasetRow>> rowsByCols = matched.stream()
                        .collect(Collectors.groupingBy((DatasetRow row) -> row.getColumns()));

                for (Set<DatasetColumn> cols : rowsByCols.keySet()) {
                    for (DatasetColumn col : cols) {
                        String colName = col.getName();

                        if (existingColNames.contains(colName))
                            colName = "datasets." + colName;

                        if (!precalc.contains(cluster, colName))
                            precalc.put(cluster, colName, new HashSet());

                        for (DatasetRow row : rowsByCols.get(cols)) {
                            if (!row.hasColumn(col))
                                continue;

                            precalc.get(cluster, colName).add(row.get(col));
                        }

                        LOGGER.log(Level.FINER, "Added {0} rows under name ''{1}''",
                                new Object[] { rowsByCols.get(cols).size(), n.getFullName() });
                    }
                }
            }
        }
    }

    dataTableView.getColumns().clear();
    for (String colName : existingColNames) {
        dataTableView.getColumns().add(createColumnFromPrecalc(colName, precalc));
    }

    // Get distinct column names.
    Stream<String> colNames = precalc.cellSet().stream().map(set -> set.getColumnKey());

    // Eliminate columns that are in the existingColNames.
    colNames = colNames.filter(colName -> !existingColNames.contains(colName));

    // And add tablecolumns for the rest.
    List<TableColumn<NameCluster, String>> cols = colNames.distinct().sorted()
            .map(colName -> createColumnFromPrecalc(colName, precalc)).collect(Collectors.toList());
    dataTableView.getColumns().addAll(cols);
    dataTableView.refresh();

    // Fill in status text field.
    statusTextField
            .setText(dataTableView.getItems().size() + " rows across " + cols.size() + " reconciled columns");
}

From source file:com.esri.geoevent.test.performance.ui.FixtureController.java

private ObservableList<TestType> getTestTypes() {
    ArrayList<TestType> list = new ArrayList<TestType>();
    list.add(TestType.TIME);//from ww  w .  j  a va 2 s.c  o  m
    list.add(TestType.STRESS);
    list.add(TestType.RAMP);
    return FXCollections.observableList(list);
}