List of usage examples for java.util.stream Collectors groupingBy
public static <T, K> Collector<T, ?, Map<K, List<T>>> groupingBy(Function<? super T, ? extends K> classifier)
From source file:entity.service.EntryFacadeREST.java
@GET @Path("startlist/{raceid}/{categoryid}") @Produces("application/json") @Asynchronous/* w ww .j a va 2s .com*/ public void generateStartlist(@Suspended final AsyncResponse asyncResponse, @PathParam("raceid") Integer raceid, @PathParam("categoryid") Integer categoryid) { List<Entry> entries = (List) em .createQuery("SELECT e " + "FROM Entry e " + "WHERE e.key.raceId = :raceid AND " + "e.status = 'CHECKED' AND " + "e.category.id = :categoryid") .setParameter("raceid", raceid).setParameter("categoryid", categoryid).getResultList(); Map<String, List<Entry>> entriesByGender = entries.stream() .sorted((e1, e2) -> e1.getKey().getRacenum().compareTo(e2.getKey().getRacenum())) .collect(Collectors.groupingBy(e -> e.getContestant().getGender())); if (!entries.isEmpty()) { String categoryName = entries.get(0).getCategoryName(); StartlistExcelDocument startDoc = documentFactory.createStartlist(); startDoc.withCategoryName(categoryName).withEntries(entriesByGender); if (startDoc.generate()) { HashMap<String, Object> msgParams = new HashMap<>(); msgParams.put("filename", startDoc.getFileName()); JsonObject jsonMsg = JsonBuilder.getJsonMsg("Dokumentum ellltsa sikeres!", JsonBuilder.MsgType.SUCCESS, msgParams); asyncResponse.resume(Response.ok(jsonMsg).build()); } else { JsonObject jsonMsg = JsonBuilder.getJsonMsg("Dokumentum generlsi hiba!", JsonBuilder.MsgType.ERROR, null); asyncResponse.resume(Response.ok(jsonMsg).status(Response.Status.INTERNAL_SERVER_ERROR).build()); } } JsonObject jsonMsg = JsonBuilder.getJsonMsg("Nincsenek nevezsek!", JsonBuilder.MsgType.ERROR, null); asyncResponse.resume(Response.ok(jsonMsg).build()); }
From source file:energy.usef.dso.service.business.DsoPlanboardBusinessService.java
/** * Creates flex offers map required to place flex orders. * * @return Map: Congestion point entity address -> Map: PTU Date -> Flex Offer list *//*from w w w .j a v a 2 s. c om*/ public Map<String, Map<LocalDate, List<PlanboardMessage>>> findOrderableFlexOffers() { List<PlanboardMessage> flexOffers = planboardMessageRepository.findOrderableFlexOffers(); Map<String, List<PlanboardMessage>> flexOffersPerConnectionGroup = flexOffers.stream() .filter(this::validateOffer) .collect(Collectors.groupingBy(pm -> pm.getConnectionGroup().getUsefIdentifier())); return flexOffersPerConnectionGroup.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().stream().collect(Collectors.groupingBy(PlanboardMessage::getPeriod)))); }
From source file:com.netflix.spinnaker.clouddriver.titus.client.RegionScopedTitusClient.java
private Map<String, List<com.netflix.titus.grpc.protogen.Task>> getTasks(List<String> jobIds, boolean includeDoneJobs) { TaskQuery.Builder taskQueryBuilder = TaskQuery.newBuilder(); if (!jobIds.isEmpty()) { taskQueryBuilder.putFilteringCriteria("jobIds", jobIds.stream().collect(Collectors.joining(","))); }//from w ww. j a va 2 s .c o m if (titusRegion.getFeatureFlags().contains("jobIds")) { taskQueryBuilder.putFilteringCriteria("attributes", "source:spinnaker"); } String filterByStates = "Launched,StartInitiated,Started"; if (includeDoneJobs) { filterByStates = filterByStates + ",KillInitiated,Finished"; } taskQueryBuilder.putFilteringCriteria("taskStates", filterByStates); List<com.netflix.titus.grpc.protogen.Task> tasks = getTasksWithFilter(taskQueryBuilder); return tasks.stream().collect(Collectors.groupingBy(com.netflix.titus.grpc.protogen.Task::getJobId)); }
From source file:com.ikanow.aleph2.data_import_manager.analytics.utils.TestAnalyticTriggerCrudUtils.java
@Test public void test_activateUpdateTimesAndSuspend() throws InterruptedException { assertEquals(0, _test_crud.countObjects().join().intValue()); final DataBucketBean bucket = buildBucket("/test/active/trigger", true); // 1) Store as above {/* w w w .j ava2 s.com*/ final Stream<AnalyticTriggerStateBean> test_stream = AnalyticTriggerBeanUtils .generateTriggerStateStream(bucket, false, Optional.of("test_host")); final List<AnalyticTriggerStateBean> test_list = test_stream.collect(Collectors.toList()); System.out.println("Resources = \n" + test_list.stream() .map(t -> BeanTemplateUtils.toJson(t).toString()).collect(Collectors.joining("\n"))); assertEquals(8L, test_list.size());//(8 not 7 because we only dedup at the DB) // 4 internal dependencies assertEquals(4L, test_list.stream().filter(t -> null != t.job_name()).count()); // 5 external dependencies assertEquals(4L, test_list.stream().filter(t -> null == t.job_name()).count()); final Map<Tuple2<String, String>, List<AnalyticTriggerStateBean>> grouped_triggers = test_list.stream() .collect(Collectors.groupingBy(t -> Tuples._2T(t.bucket_name(), null))); AnalyticTriggerCrudUtils.storeOrUpdateTriggerStage(bucket, _test_crud, grouped_triggers).join(); assertEquals(7L, _test_crud.countObjects().join().intValue()); } //DEBUG //printTriggerDatabase(); // Sleep to change times Thread.sleep(100L); //(activate) bucket.analytic_thread().jobs().stream().filter(job -> Optional.ofNullable(job.enabled()).orElse(true)) .forEach(job -> { AnalyticTriggerCrudUtils.createActiveBucketOrJobRecord(_test_crud, bucket, Optional.of(job), Optional.of("test_host")); }); // 2) Activate then save suspended - check suspended goes to pending { final Stream<AnalyticTriggerStateBean> test_stream = AnalyticTriggerBeanUtils .generateTriggerStateStream(bucket, true, Optional.of("test_host")); final List<AnalyticTriggerStateBean> test_list = test_stream.collect(Collectors.toList()); assertTrue("All suspended", test_list.stream().filter(t -> t.is_bucket_suspended()).findFirst().isPresent()); final Map<Tuple2<String, String>, List<AnalyticTriggerStateBean>> grouped_triggers = test_list.stream() .collect(Collectors.groupingBy(t -> Tuples._2T(t.bucket_name(), null))); //DEBUG //printTriggerDatabase(); assertEquals(13L, _test_crud.countObjects().join().intValue()); // ie 5 active jobs + 1 active bucket, 4 job dependencies, 3 external triggers AnalyticTriggerCrudUtils.storeOrUpdateTriggerStage(bucket, _test_crud, grouped_triggers).join(); //DEBUG //printTriggerDatabase(); assertEquals(17L, _test_crud.countObjects().join().intValue()); // ie 5 active jobs, + 1 active bucket, 4 job dependencies x2 (pending/non-pending), the 3 external triggers get overwritten assertEquals(7L, _test_crud.countObjectsBySpec(CrudUtils.allOf(AnalyticTriggerStateBean.class) .when(AnalyticTriggerStateBean::is_bucket_suspended, true)).join().intValue()); } // Sleep to change times Thread.sleep(100L); // 3) De-activate and check reverts to pending { AnalyticTriggerCrudUtils.deleteActiveJobEntries(_test_crud, bucket, bucket.analytic_thread().jobs(), Optional.of("test_host")).join(); bucket.analytic_thread().jobs().stream().forEach(job -> { //System.out.println("BEFORE: " + job.name() + ": " + _test_crud.countObjects().join().intValue()); AnalyticTriggerCrudUtils .updateCompletedJob(_test_crud, bucket.full_name(), job.name(), Optional.of("test_host")) .join(); //System.out.println(" AFTER: " + job.name() + ": " + _test_crud.countObjects().join().intValue()); }); assertEquals(8L, _test_crud.countObjects().join().intValue()); assertEquals(7L, _test_crud.countObjectsBySpec(CrudUtils.allOf(AnalyticTriggerStateBean.class) .when(AnalyticTriggerStateBean::is_pending, false)).join().intValue()); AnalyticTriggerCrudUtils .deleteActiveBucketRecord(_test_crud, bucket.full_name(), Optional.of("test_host")).join(); assertEquals(7L, _test_crud.countObjects().join().intValue()); } }
From source file:com.ggvaidya.scinames.summary.NameStabilityView.java
public void init() { Project project = projectView.getProject(); // What do we actually need to do? boolean flag_calculateNameSimilarity = (toCalculate & NAME_SIMILARITY) == NAME_SIMILARITY; boolean flag_calculateClusterSimilarity = (toCalculate & CLUSTER_SIMILARITY) == CLUSTER_SIMILARITY; boolean flag_calculateCircumscriptionSimilarity = (toCalculate & CIRCUMSCRIPTIONAL_SIMILARITY) == CIRCUMSCRIPTIONAL_SIMILARITY; // Setup stage. stage.setTitle("Name stability between " + project.getDatasets().size() + " datasets"); // Setup table. controller.getTableEditableProperty().set(false); //controller.setTableColumnResizeProperty(TableView.CONSTRAINED_RESIZE_POLICY); ObservableList<TableColumn> cols = controller.getTableColumnsProperty(); cols.clear();/*w w w . j a va 2 s.c om*/ // Precalculating. Table<Dataset, String, String> precalc = HashBasedTable.create(); // Set up columns. cols.add(createTableColumnFromPrecalc(precalc, "dataset")); cols.add(createTableColumnFromPrecalc(precalc, "date")); cols.add(createTableColumnFromPrecalc(precalc, "year")); cols.add(createTableColumnFromPrecalc(precalc, "count_binomial")); cols.add(createTableColumnFromPrecalc(precalc, "count_genera")); cols.add(createTableColumnFromPrecalc(precalc, "count_monotypic_genera")); cols.add(createTableColumnFromPrecalc(precalc, "names_added")); //cols.add(createTableColumnFromPrecalc(precalc, "names_added_list")); cols.add(createTableColumnFromPrecalc(precalc, "names_deleted")); //cols.add(createTableColumnFromPrecalc(precalc, "names_deleted_list")); cols.add(createTableColumnFromPrecalc(precalc, "species_added")); //cols.add(createTableColumnFromPrecalc(precalc, "species_added_list")); cols.add(createTableColumnFromPrecalc(precalc, "species_deleted")); //cols.add(createTableColumnFromPrecalc(precalc, "species_deleted_list")); cols.add(createTableColumnFromPrecalc(precalc, "mean_binomials_per_genera")); cols.add(createTableColumnFromPrecalc(precalc, "median_binomials_per_genera")); cols.add(createTableColumnFromPrecalc(precalc, "mode_binomials_per_genera_list")); /* All them stability calculations */ if (flag_calculateNameSimilarity) { cols.add(createTableColumnFromPrecalc(precalc, "names_identical_to_prev")); cols.add(createTableColumnFromPrecalc(precalc, "names_identical_to_prev_pc_this")); cols.add(createTableColumnFromPrecalc(precalc, "names_identical_to_prev_pc_union")); cols.add(createTableColumnFromPrecalc(precalc, "names_identical_to_prev_pc_prev")); cols.add(createTableColumnFromPrecalc(precalc, "names_identical_to_next")); cols.add(createTableColumnFromPrecalc(precalc, "names_identical_to_next_pc_this")); cols.add(createTableColumnFromPrecalc(precalc, "names_identical_to_next_pc_union")); cols.add(createTableColumnFromPrecalc(precalc, "names_identical_to_next_pc_next")); cols.add(createTableColumnFromPrecalc(precalc, "names_identical_to_first")); cols.add(createTableColumnFromPrecalc(precalc, "names_identical_to_first_pc_this")); cols.add(createTableColumnFromPrecalc(precalc, "names_identical_to_first_pc_union")); cols.add(createTableColumnFromPrecalc(precalc, "names_identical_to_first_pc_first")); cols.add(createTableColumnFromPrecalc(precalc, "names_identical_to_last")); cols.add(createTableColumnFromPrecalc(precalc, "names_identical_to_last_pc_this")); cols.add(createTableColumnFromPrecalc(precalc, "names_identical_to_last_pc_union")); cols.add(createTableColumnFromPrecalc(precalc, "names_identical_to_last_pc_last")); } if (flag_calculateClusterSimilarity) { cols.add(createTableColumnFromPrecalc(precalc, "clusters_identical_to_prev")); cols.add(createTableColumnFromPrecalc(precalc, "clusters_identical_to_prev_pc_this")); cols.add(createTableColumnFromPrecalc(precalc, "clusters_identical_to_prev_pc_union")); cols.add(createTableColumnFromPrecalc(precalc, "clusters_identical_to_prev_pc_prev")); cols.add(createTableColumnFromPrecalc(precalc, "clusters_identical_to_next")); cols.add(createTableColumnFromPrecalc(precalc, "clusters_identical_to_next_pc_this")); cols.add(createTableColumnFromPrecalc(precalc, "clusters_identical_to_next_pc_union")); cols.add(createTableColumnFromPrecalc(precalc, "clusters_identical_to_next_pc_next")); cols.add(createTableColumnFromPrecalc(precalc, "clusters_identical_to_first")); cols.add(createTableColumnFromPrecalc(precalc, "clusters_identical_to_first_pc_this")); cols.add(createTableColumnFromPrecalc(precalc, "clusters_identical_to_first_pc_union")); cols.add(createTableColumnFromPrecalc(precalc, "clusters_identical_to_first_pc_first")); cols.add(createTableColumnFromPrecalc(precalc, "clusters_identical_to_last")); cols.add(createTableColumnFromPrecalc(precalc, "clusters_identical_to_last_pc_this")); cols.add(createTableColumnFromPrecalc(precalc, "clusters_identical_to_last_pc_union")); cols.add(createTableColumnFromPrecalc(precalc, "clusters_identical_to_last_pc_last")); } if (flag_calculateCircumscriptionSimilarity) { cols.add(createTableColumnFromPrecalc(precalc, "circumscriptions_identical_to_prev")); cols.add(createTableColumnFromPrecalc(precalc, "circumscriptions_identical_to_prev_pc_this")); cols.add(createTableColumnFromPrecalc(precalc, "circumscriptions_identical_to_prev_pc_union")); cols.add(createTableColumnFromPrecalc(precalc, "circumscriptions_identical_to_prev_pc_prev")); cols.add(createTableColumnFromPrecalc(precalc, "circumscriptions_identical_to_next")); cols.add(createTableColumnFromPrecalc(precalc, "circumscriptions_identical_to_next_pc_this")); cols.add(createTableColumnFromPrecalc(precalc, "circumscriptions_identical_to_next_pc_union")); cols.add(createTableColumnFromPrecalc(precalc, "circumscriptions_identical_to_next_pc_next")); cols.add(createTableColumnFromPrecalc(precalc, "circumscriptions_identical_to_first")); cols.add(createTableColumnFromPrecalc(precalc, "circumscriptions_identical_to_first_pc_this")); cols.add(createTableColumnFromPrecalc(precalc, "circumscriptions_identical_to_first_pc_union")); cols.add(createTableColumnFromPrecalc(precalc, "circumscriptions_identical_to_first_pc_first")); cols.add(createTableColumnFromPrecalc(precalc, "circumscriptions_identical_to_last")); cols.add(createTableColumnFromPrecalc(precalc, "circumscriptions_identical_to_last_pc_this")); cols.add(createTableColumnFromPrecalc(precalc, "circumscriptions_identical_to_last_pc_union")); cols.add(createTableColumnFromPrecalc(precalc, "circumscriptions_identical_to_last_pc_last")); } Set<String> recognitionColumns = new HashSet<>(); // Calculate binomials per dataset. Map<Name, Set<Dataset>> datasetsPerName = new HashMap<>(); // Prepare to loop! List<Dataset> checklists = project.getChecklists(); // BIRD HACK! Include all datasets! // checklists = project.getDatasets(); // Set table items. We're only interested in checklists, because // there's no such thing as "name stability" between non-checklist datasets. controller.getTableItemsProperty().set(FXCollections.observableArrayList(checklists)); List<Dataset> prevChecklists = new LinkedList<>(); Dataset firstChecklist = checklists.get(0); Dataset lastChecklist = checklists.get(checklists.size() - 1); // TODO: This used to be prevDataset, but prevChecklist makes a lot more sense, since we // want to compare checklists with each other, ignoring datasets. Would be nice if someone // with copious free time could look over the calculations and make sure they don't assume // that the previous checklist is also the previous dataset? Dataset prevChecklist = null; int index = -1; for (Dataset ds : checklists) { index++; Dataset nextChecklist = (index < (checklists.size() - 1) ? checklists.get(index + 1) : null); precalc.put(ds, "dataset", ds.getName()); precalc.put(ds, "date", ds.getDate().asYYYYmmDD("-")); precalc.put(ds, "year", ds.getDate().getYearAsString()); Set<Name> recognizedBinomials = project.getRecognizedNames(ds).stream().flatMap(n -> n.asBinomial()) .collect(Collectors.toSet()); precalc.put(ds, "count_binomial", String.valueOf(recognizedBinomials.size())); Set<Name> recognizedGenera = recognizedBinomials.stream().flatMap(n -> n.asGenus()) .collect(Collectors.toSet()); precalc.put(ds, "count_genera", String.valueOf(recognizedGenera.size())); precalc.put(ds, "mean_binomials_per_genera", new BigDecimal(((double) recognizedBinomials.size()) / recognizedGenera.size()) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); Map<Name, List<Name>> countBinomialsPerGenus = recognizedBinomials.stream() // Eliminate names that have zero (or more than one?!) genus name. .filter(n -> (n.asGenus().count() == 1)) .collect(Collectors.groupingBy(n -> n.asGenus().findAny().get())); /* LOGGER.info("Debugging: list of " + recognizedGenera.size() + " genera: " + recognizedGenera.stream().map(n -> n.getFullName()).collect(Collectors.joining(", ")) ); */ precalc.put(ds, "count_monotypic_genera", String.valueOf(countBinomialsPerGenus.entrySet().stream() .filter(entry -> new HashSet<>(entry.getValue()).size() == 1).count())); /* LOGGER.info("Debugging: list of monotypic genera: " + countBinomialsPerGenus.entrySet().stream() .filter(entry -> new HashSet<>(entry.getValue()).size() == 1) .map(entry -> entry.getKey().getFullName()) .collect(Collectors.joining(", ")) ); */ // Species added and deleted Set<Name> namesAdded = ds.getChanges(project).filter(ch -> ch.getType().equals(ChangeType.ADDITION)) .flatMap(ch -> ch.getToStream()).collect(Collectors.toSet()); Set<Name> namesDeleted = ds.getChanges(project).filter(ch -> ch.getType().equals(ChangeType.DELETION)) .flatMap(ch -> ch.getFromStream()).collect(Collectors.toSet()); // TODO: This isn't so useful -- the more useful measure would be the number of all species added // and all species deleted, making sure there isn't a cluster-al overlap. precalc.put(ds, "names_added", String.valueOf(namesAdded.size())); //precalc.put(ds, "names_added_list", namesAdded.stream().sorted().map(n -> n.getFullName()).collect(Collectors.joining(", "))); precalc.put(ds, "names_deleted", String.valueOf(namesDeleted.size())); //precalc.put(ds, "names_deleted_list", namesDeleted.stream().sorted().map(n -> n.getFullName()).collect(Collectors.joining(", "))); // Eliminate names that have been added, but were previously recognized at the species level. Set<Name> speciesAdded = namesAdded; if (prevChecklist != null) { Set<Name> prevRecognizedNames = project.getNameClusterManager() .getClusters(project.getRecognizedNames(prevChecklist)).stream() .flatMap(nc -> nc.getNames().stream()).collect(Collectors.toSet()); speciesAdded = namesAdded.stream().filter(n -> !prevRecognizedNames.contains(n)) .collect(Collectors.toSet()); } // Eliminate names that are still represented in the checklist by a species cluster. // (Note that this includes cases where a subspecies is removed, but another subspecies // or the nominal species is still recognized!) Set<Name> currentlyRecognizedBinomialNames = project.getNameClusterManager() .getClusters(project.getRecognizedNames(ds)).stream().flatMap(nc -> nc.getNames().stream()) .flatMap(n -> n.asBinomial()).collect(Collectors.toSet()); Set<Name> speciesDeleted = namesDeleted.stream() .filter(n -> !n.asBinomial().anyMatch(bn -> currentlyRecognizedBinomialNames.contains(bn))) .collect(Collectors.toSet()); precalc.put(ds, "species_added", String.valueOf(speciesAdded.size())); precalc.put(ds, "species_added_list", speciesAdded.stream().sorted().map(n -> n.getFullName()).collect(Collectors.joining(", "))); precalc.put(ds, "species_deleted", String.valueOf(speciesDeleted.size())); precalc.put(ds, "species_deleted_list", speciesDeleted.stream().sorted().map(n -> n.getFullName()).collect(Collectors.joining(", "))); // Measures of species per genera java.util.Map<String, Set<Name>> binomialsPerGenera = recognizedBinomials.stream() .collect(Collectors.toMap(n -> n.getGenus(), n -> { Set<Name> set = new HashSet<Name>(); set.add(n); return set; }, (a, b) -> { a.addAll(b); return a; })); List<Integer> binomialsPerGeneraCounts = binomialsPerGenera.values().stream().map(set -> set.size()) .sorted().collect(Collectors.toList()); Frequency freq = new Frequency(); for (String genus : binomialsPerGenera.keySet()) { // Blech. for (Name binom : binomialsPerGenera.get(genus)) { freq.addValue(genus); } } List<Comparable<?>> modeGenera = freq.getMode(); precalc.put(ds, "mode_binomials_per_genera_list", modeGenera.stream().map(o -> o.toString() + ": " + freq.getCount(o) + " binomials") .collect(Collectors.joining("; "))); double[] binomialsPerGeneraCountsAsDouble = binomialsPerGeneraCounts.stream() .mapToDouble(Integer::doubleValue).toArray(); Median median = new Median(); precalc.put(ds, "median_binomials_per_genera", String.valueOf(median.evaluate(binomialsPerGeneraCountsAsDouble))); if (firstChecklist == null) { // precalc.put(ds, "names_identical_to_first", "NA"); // precalc.put(ds, "names_identical_to_first_pc", "NA"); } else { if (flag_calculateNameSimilarity) { precalc.put(ds, "names_identical_to_first", String.valueOf(getBinomialNamesIntersection(project, ds, firstChecklist).size())); precalc.put(ds, "names_identical_to_first_pc_this", new BigDecimal((double) getBinomialNamesIntersection(project, ds, firstChecklist).size() / recognizedBinomials.size() * 100).setScale(2, BigDecimal.ROUND_HALF_EVEN) .toPlainString()); precalc.put(ds, "names_identical_to_first_pc_union", new BigDecimal((double) getBinomialNamesIntersection(project, ds, firstChecklist).size() / getBinomialNamesUnion(project, ds, firstChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "names_identical_to_first_pc_first", new BigDecimal((double) getBinomialNamesIntersection(project, ds, firstChecklist).size() / getBinomialNamesUnion(project, firstChecklist, firstChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); } if (flag_calculateClusterSimilarity) { int clustersForDataset = project.getNameClusterManager().getClusters(recognizedBinomials) .size(); if (clustersForDataset != recognizedBinomials.size()) { throw new RuntimeException( "We have " + clustersForDataset + " clusters for this dataset, but " + recognizedBinomials.size() + " recognized binomials. What?"); } precalc.put(ds, "clusters_identical_to_first", String.valueOf(getBinomialClustersIntersection(project, ds, firstChecklist).size())); precalc.put(ds, "clusters_identical_to_first_pc_this", new BigDecimal( (double) getBinomialClustersIntersection(project, ds, firstChecklist).size() / getBinomialClustersUnion(project, ds, ds).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "clusters_identical_to_first_pc_union", new BigDecimal( (double) getBinomialClustersIntersection(project, ds, firstChecklist).size() / getBinomialClustersUnion(project, ds, firstChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "clusters_identical_to_first_pc_first", new BigDecimal( (double) getBinomialClustersIntersection(project, ds, firstChecklist).size() / getBinomialClustersUnion(project, firstChecklist, firstChecklist).size() * 100).setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); } if (flag_calculateCircumscriptionSimilarity) { precalc.put(ds, "circumscriptions_identical_to_first", String .valueOf(getBinomialTaxonConceptsIntersection(project, ds, firstChecklist).size())); precalc.put(ds, "circumscriptions_identical_to_first_pc_this", new BigDecimal( (double) getBinomialTaxonConceptsIntersection(project, ds, firstChecklist).size() / getBinomialTaxonConceptsUnion(project, ds, ds).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "circumscriptions_identical_to_first_pc_union", new BigDecimal( (double) getBinomialTaxonConceptsIntersection(project, ds, firstChecklist).size() / getBinomialTaxonConceptsUnion(project, ds, firstChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "circumscriptions_identical_to_first_pc_first", new BigDecimal( (double) getBinomialTaxonConceptsIntersection(project, ds, firstChecklist).size() / getBinomialTaxonConceptsUnion(project, firstChecklist, firstChecklist).size() * 100).setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); } } if (lastChecklist == null) { // precalc.put(ds, "names_identical_to_first", "NA"); // precalc.put(ds, "names_identical_to_first_pc", "NA"); } else { if (flag_calculateNameSimilarity) { precalc.put(ds, "names_identical_to_last", String.valueOf(getBinomialNamesIntersection(project, ds, lastChecklist).size())); precalc.put(ds, "names_identical_to_last_pc_this", new BigDecimal((double) getBinomialNamesIntersection(project, ds, lastChecklist).size() / recognizedBinomials.size() * 100).setScale(2, BigDecimal.ROUND_HALF_EVEN) .toPlainString()); precalc.put(ds, "names_identical_to_last_pc_union", new BigDecimal((double) getBinomialNamesIntersection(project, ds, lastChecklist).size() / getBinomialNamesUnion(project, ds, lastChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "names_identical_to_last_pc_last", new BigDecimal((double) getBinomialNamesIntersection(project, ds, lastChecklist).size() / getBinomialNamesUnion(project, lastChecklist, lastChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); } if (flag_calculateClusterSimilarity) { int clustersForDataset = project.getNameClusterManager().getClusters(recognizedBinomials) .size(); if (clustersForDataset != recognizedBinomials.size()) { throw new RuntimeException( "We have " + clustersForDataset + " clusters for this dataset, but " + recognizedBinomials.size() + " recognized binomials. What?"); } precalc.put(ds, "clusters_identical_to_last", String.valueOf(getBinomialClustersIntersection(project, ds, lastChecklist).size())); precalc.put(ds, "clusters_identical_to_last_pc_this", new BigDecimal( (double) getBinomialClustersIntersection(project, ds, lastChecklist).size() / getBinomialClustersUnion(project, ds, ds).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "clusters_identical_to_last_pc_union", new BigDecimal( (double) getBinomialClustersIntersection(project, ds, lastChecklist).size() / getBinomialClustersUnion(project, ds, lastChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "clusters_identical_to_last_pc_last", new BigDecimal( (double) getBinomialClustersIntersection(project, ds, lastChecklist).size() / getBinomialClustersUnion(project, lastChecklist, lastChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); } if (flag_calculateCircumscriptionSimilarity) { precalc.put(ds, "circumscriptions_identical_to_last", String .valueOf(getBinomialTaxonConceptsIntersection(project, ds, lastChecklist).size())); precalc.put(ds, "circumscriptions_identical_to_last_pc_this", new BigDecimal( (double) getBinomialTaxonConceptsIntersection(project, ds, lastChecklist).size() / getBinomialTaxonConceptsUnion(project, ds, ds).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "circumscriptions_identical_to_last_pc_union", new BigDecimal( (double) getBinomialTaxonConceptsIntersection(project, ds, lastChecklist).size() / getBinomialTaxonConceptsUnion(project, ds, lastChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "circumscriptions_identical_to_last_pc_last", new BigDecimal( (double) getBinomialTaxonConceptsIntersection(project, ds, lastChecklist).size() / getBinomialTaxonConceptsUnion(project, lastChecklist, lastChecklist).size() * 100).setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); } } if (prevChecklist == null) { // precalc.put(ds, "names_identical_to_prev", "NA"); // precalc.put(ds, "names_identical_to_prev_pc", "NA"); } else { if (flag_calculateNameSimilarity) { precalc.put(ds, "names_identical_to_prev", String.valueOf(getBinomialNamesIntersection(project, ds, prevChecklist).size())); precalc.put(ds, "names_identical_to_prev_pc_this", new BigDecimal((double) getBinomialNamesIntersection(project, ds, prevChecklist).size() / recognizedBinomials.size() * 100).setScale(2, BigDecimal.ROUND_HALF_EVEN) .toPlainString()); precalc.put(ds, "names_identical_to_prev_pc_union", new BigDecimal((double) getBinomialNamesIntersection(project, ds, prevChecklist).size() / getBinomialNamesUnion(project, ds, prevChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "names_identical_to_prev_pc_prev", new BigDecimal((double) getBinomialNamesIntersection(project, ds, prevChecklist).size() / getBinomialNamesUnion(project, prevChecklist, prevChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); } if (flag_calculateClusterSimilarity) { int clustersForDataset = project.getNameClusterManager().getClusters(recognizedBinomials) .size(); if (clustersForDataset != recognizedBinomials.size()) { throw new RuntimeException( "We have " + clustersForDataset + " clusters for this dataset, but " + recognizedBinomials.size() + " recognized binomials. What?"); } precalc.put(ds, "clusters_identical_to_prev", String.valueOf(getBinomialClustersIntersection(project, ds, prevChecklist).size())); precalc.put(ds, "clusters_identical_to_prev_pc_this", new BigDecimal( (double) getBinomialClustersIntersection(project, ds, prevChecklist).size() / getBinomialClustersUnion(project, ds, ds).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "clusters_identical_to_prev_pc_union", new BigDecimal( (double) getBinomialClustersIntersection(project, ds, prevChecklist).size() / getBinomialClustersUnion(project, ds, prevChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "clusters_identical_to_prev_pc_prev", new BigDecimal( (double) getBinomialClustersIntersection(project, ds, prevChecklist).size() / getBinomialClustersUnion(project, prevChecklist, prevChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); } if (flag_calculateCircumscriptionSimilarity) { precalc.put(ds, "circumscriptions_identical_to_prev", String .valueOf(getBinomialTaxonConceptsIntersection(project, ds, prevChecklist).size())); precalc.put(ds, "circumscriptions_identical_to_prev_pc_this", new BigDecimal( (double) getBinomialTaxonConceptsIntersection(project, ds, prevChecklist).size() / getBinomialTaxonConceptsUnion(project, ds, ds).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "circumscriptions_identical_to_prev_pc_union", new BigDecimal( (double) getBinomialTaxonConceptsIntersection(project, ds, prevChecklist).size() / getBinomialTaxonConceptsUnion(project, ds, prevChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "circumscriptions_identical_to_prev_pc_prev", new BigDecimal( (double) getBinomialTaxonConceptsIntersection(project, ds, prevChecklist).size() / getBinomialTaxonConceptsUnion(project, prevChecklist, prevChecklist).size() * 100).setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); } // FYI, getBinomialTaxonConceptsUnion(project, ds, prevChecklist).size() should always be equal to the number of species in the dataset. } if (nextChecklist == null) { // precalc.put(ds, "names_identical_to_prev", "NA"); // precalc.put(ds, "names_identical_to_prev_pc", "NA"); } else { if (flag_calculateNameSimilarity) { precalc.put(ds, "names_identical_to_next", String.valueOf(getBinomialNamesIntersection(project, ds, nextChecklist).size())); precalc.put(ds, "names_identical_to_next_pc_this", new BigDecimal((double) getBinomialNamesIntersection(project, ds, nextChecklist).size() / recognizedBinomials.size() * 100).setScale(2, BigDecimal.ROUND_HALF_EVEN) .toPlainString()); precalc.put(ds, "names_identical_to_next_pc_union", new BigDecimal((double) getBinomialNamesIntersection(project, ds, nextChecklist).size() / getBinomialNamesUnion(project, ds, nextChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "names_identical_to_next_pc_next", new BigDecimal((double) getBinomialNamesIntersection(project, ds, nextChecklist).size() / getBinomialNamesUnion(project, nextChecklist, nextChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); } if (flag_calculateClusterSimilarity) { int clustersForDataset = project.getNameClusterManager().getClusters(recognizedBinomials) .size(); if (clustersForDataset != recognizedBinomials.size()) { throw new RuntimeException( "We have " + clustersForDataset + " clusters for this dataset, but " + recognizedBinomials.size() + " recognized binomials. What?"); } precalc.put(ds, "clusters_identical_to_next", String.valueOf(getBinomialClustersIntersection(project, ds, nextChecklist).size())); precalc.put(ds, "clusters_identical_to_next_pc_this", new BigDecimal( (double) getBinomialClustersIntersection(project, ds, nextChecklist).size() / getBinomialClustersUnion(project, ds, ds).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "clusters_identical_to_next_pc_union", new BigDecimal( (double) getBinomialClustersIntersection(project, ds, nextChecklist).size() / getBinomialClustersUnion(project, ds, nextChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "clusters_identical_to_next_pc_next", new BigDecimal( (double) getBinomialClustersIntersection(project, ds, nextChecklist).size() / getBinomialClustersUnion(project, nextChecklist, nextChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); } if (flag_calculateCircumscriptionSimilarity) { precalc.put(ds, "circumscriptions_identical_to_next", String .valueOf(getBinomialTaxonConceptsIntersection(project, ds, nextChecklist).size())); precalc.put(ds, "circumscriptions_identical_to_next_pc_this", new BigDecimal( (double) getBinomialTaxonConceptsIntersection(project, ds, nextChecklist).size() / getBinomialTaxonConceptsUnion(project, ds, ds).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "circumscriptions_identical_to_next_pc_union", new BigDecimal( (double) getBinomialTaxonConceptsIntersection(project, ds, nextChecklist).size() / getBinomialTaxonConceptsUnion(project, ds, nextChecklist).size() * 100) .setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); precalc.put(ds, "circumscriptions_identical_to_next_pc_next", new BigDecimal( (double) getBinomialTaxonConceptsIntersection(project, ds, nextChecklist).size() / getBinomialTaxonConceptsUnion(project, nextChecklist, nextChecklist).size() * 100).setScale(2, BigDecimal.ROUND_HALF_EVEN).toPlainString()); } } /* // For the visualization thingie. int total = prevChecklists.size(); List<Integer> counts = new LinkedList<>(); for(Name name: recognizedBinomials) { int prevRecognized = 0; if(!datasetsPerName.containsKey(name)) { datasetsPerName.put(name, new HashSet<>()); } else { prevRecognized = datasetsPerName.get(name).size(); } datasetsPerName.get(name).add(ds); counts.add( (int)( ((double)prevRecognized)/total*100 ) ); } Map<Integer, List<Integer>> countsByPercentage = counts.stream().sorted().collect(Collectors.groupingBy(n -> (int)(n/10)*10)); for(int percentage: countsByPercentage.keySet()) { precalc.put(ds, "previously_recognized_" + percentage + "pc", String.valueOf(countsByPercentage.get(percentage).size())); recognitionColumns.add("previously_recognized_" + percentage + "pc"); } prevChecklists.add(ds); */ // Set up the previous checklist for the next loop. prevChecklist = ds; } /* LinkedList<String> recognitionColumnsList = new LinkedList<>(recognitionColumns); recognitionColumnsList.sort(null); for(String colName: recognitionColumnsList) { cols.add(createTableColumnFromPrecalc(precalc, colName)); }*/ }
From source file:alfio.controller.ReservationController.java
@RequestMapping(value = "/event/{eventName}/reservation/{reservationId}/success", method = RequestMethod.GET) public String showConfirmationPage(@PathVariable("eventName") String eventName, @PathVariable("reservationId") String reservationId, @RequestParam(value = "confirmation-email-sent", required = false, defaultValue = "false") boolean confirmationEmailSent, @RequestParam(value = "ticket-email-sent", required = false, defaultValue = "false") boolean ticketEmailSent, Model model, Locale locale, HttpServletRequest request) { return eventRepository.findOptionalByShortName(eventName).map(ev -> { Optional<TicketReservation> tr = ticketReservationManager.findById(reservationId); return tr.filter(r -> r.getStatus() == TicketReservationStatus.COMPLETE).map(reservation -> { SessionUtil.removeSpecialPriceData(request); model.addAttribute("reservationId", reservationId); model.addAttribute("reservation", reservation); model.addAttribute("confirmationEmailSent", confirmationEmailSent); model.addAttribute("ticketEmailSent", ticketEmailSent); List<Ticket> tickets = ticketReservationManager.findTicketsInReservation(reservationId); List<Triple<AdditionalService, List<AdditionalServiceText>, AdditionalServiceItem>> additionalServices = ticketReservationManager .findAdditionalServicesInReservation(reservationId).stream() .map(t -> Triple.of( t.getLeft(), t.getMiddle().stream() .filter(d -> d.getLocale().equals(locale.getLanguage())).collect(toList()), t.getRight())) .collect(Collectors.toList()); boolean hasPaidSupplement = ticketReservationManager.hasPaidSupplements(reservationId); model.addAttribute("ticketsByCategory", tickets.stream() .collect(Collectors.groupingBy(Ticket::getCategoryId)).entrySet().stream().map((e) -> { TicketCategory category = eventManager.getTicketCategoryById(e.getKey(), ev.getId()); List<TicketDecorator> decorators = TicketDecorator .decorate(e.getValue(), !hasPaidSupplement && configurationManager.getBooleanConfigValue( Configuration.from(ev.getOrganizationId(), ev.getId(), category.getId(), ALLOW_FREE_TICKETS_CANCELLATION), false), eventManager.checkTicketCancellationPrerequisites(), ticket -> ticketHelper.findTicketFieldConfigurationAndValue(ev.getId(), ticket, locale), tickets.size() == 1, TicketDecorator.EMPTY_PREFIX_GENERATOR); return Pair.of(category, decorators); }).collect(toList())); boolean ticketsAllAssigned = tickets.stream().allMatch(Ticket::getAssigned); model.addAttribute("ticketsAreAllAssigned", ticketsAllAssigned); model.addAttribute("collapseEnabled", tickets.size() > 1 && !ticketsAllAssigned); model.addAttribute("additionalServicesOnly", tickets.isEmpty() && !additionalServices.isEmpty()); model.addAttribute("additionalServices", additionalServices); model.addAttribute("countries", TicketHelper.getLocalizedCountries(locale)); model.addAttribute("pageTitle", "reservation-page-complete.header.title"); model.addAttribute("event", ev); model.addAttribute("useFirstAndLastName", ev.mustUseFirstAndLastName()); model.asMap().putIfAbsent("validationResult", ValidationResult.success()); return "/event/reservation-page-complete"; }).orElseGet(() -> redirectReservation(tr, eventName, reservationId)); }).orElse("redirect:/"); }
From source file:net.staticsnow.nexus.repository.apt.internal.hosted.AptHostedFacet.java
private List<Asset> selectOldPackagesToRemove(String packageName, String arch) throws IOException, PGPException { if (config.assetHistoryLimit == null) { return Collections.emptyList(); }//from w ww . ja va2 s .c o m int count = config.assetHistoryLimit; StorageTx tx = UnitOfWork.currentTx(); Map<String, Object> sqlParams = new HashMap<>(); sqlParams.put(P_PACKAGE_NAME, packageName); sqlParams.put(P_ARCHITECTURE, arch); sqlParams.put(P_ASSET_KIND, "DEB"); Iterable<Asset> assets = tx.findAssets(ASSETS_BY_PACKAGE_AND_ARCH, sqlParams, Collections.singleton(getRepository()), ""); List<Asset> removals = new ArrayList<>(); Map<String, List<Asset>> assetsByArch = StreamSupport.stream(assets.spliterator(), false) .collect(Collectors.groupingBy(a -> a.formatAttributes().get(P_ARCHITECTURE, String.class))); for (Map.Entry<String, List<Asset>> entry : assetsByArch.entrySet()) { if (entry.getValue().size() <= count) { continue; } int trimCount = entry.getValue().size() - count; Set<String> keepVersions = entry.getValue().stream() .map(a -> new Version(a.formatAttributes().get(P_PACKAGE_VERSION, String.class))).sorted() .skip(trimCount).map(v -> v.toString()).collect(Collectors.toSet()); entry.getValue().stream() .filter(a -> !keepVersions.contains(a.formatAttributes().get(P_PACKAGE_VERSION, String.class))) .forEach((item) -> removals.add(item)); } return removals; }
From source file:cc.arduino.contributions.packages.ContributionsIndexer.java
public Set<ContributedTool> getInstalledTools() { Set<ContributedTool> tools = new HashSet<>(); if (index == null) { return tools; }/*w w w.j av a 2s. com*/ for (ContributedPackage pack : index.getPackages()) { Collection<ContributedPlatform> platforms = pack.getPlatforms().stream() // .filter(p -> p.isInstalled()) // .collect(Collectors.toList()); Map<String, List<ContributedPlatform>> platformsByName = platforms.stream() .collect(Collectors.groupingBy(ContributedPlatform::getName)); platformsByName.forEach((platformName, platformsWithName) -> { if (platformsWithName.size() > 1) { platformsWithName = platformsWithName.stream() // .filter(p -> !p.isBuiltIn()) // .collect(Collectors.toList()); } for (ContributedPlatform p : platformsWithName) { tools.addAll(p.getResolvedTools()); } }); } return tools; }