List of usage examples for java.util Collection stream
default Stream<E> stream()
From source file:org.springframework.cloud.deployer.spi.mesos.chronos.ChronosTaskLauncher.java
@Override public String launch(AppDeploymentRequest request) { String jobName = createDeploymentId(request); String image = null;/*from w ww.j a v a2 s. com*/ try { image = request.getResource().getURI().getSchemeSpecificPart(); } catch (IOException e) { throw new IllegalArgumentException("Unable to get URI for " + request.getResource(), e); } logger.info("Using Docker image: " + image); DockerJob job = new DockerJob(); job.setName(jobName); List<Map<String, String>> envVars = new ArrayList<>(); Map<String, String> springApplicationJson = createSpringApplicationJson(request); if (springApplicationJson.size() > 0) { envVars.add(springApplicationJson); } logger.info("Using env: " + envVars); if (envVars.size() > 0) { job.setEnvironmentVariables(envVars); } job.setShell(false); job.setCommand(""); List<String> args = createCommandArgs(request); if (args.size() > 0) { job.setArguments(args); } job.setSchedule("R1//P"); job.setRetries(properties.getRetries()); DockerContainer container = new DockerContainer(); container.setImage(image); job.setContainer(container); Double cpus = deduceCpus(request); Double memory = deduceMemory(request); Collection<Constraint> constraints = deduceConstraints(request); job.setCpus(cpus); job.setMem(memory); job.setConstraints(constraints.stream().map(Constraint::toStringList).collect(Collectors.toList())); if (StringUtils.hasText(properties.getOwnerEmail())) { job.setOwner(properties.getOwnerEmail()); } if (StringUtils.hasText(properties.getOwnerName())) { job.setOwnerName(properties.getOwnerName()); } if (properties.getUris() != null && properties.getUris().length > 0) { job.setUris(Arrays.asList(properties.getUris())); } try { if (logger.isDebugEnabled()) { logger.debug("Launching Job with definition:\n" + job.toString()); } chronos.createDockerJob(job); } catch (ChronosException e) { logger.error(e.getMessage(), e); throw new IllegalStateException(String.format("Error while creating job '%s'", jobName), e); } return jobName; }
From source file:com.romeikat.datamessie.core.base.dao.impl.SourceDao.java
public Map<Document, Source> getForDocuments(final SharedSessionContract ssc, final Collection<Document> documents) { final Set<Long> sourceIds = documents.stream().map(d -> d.getSourceId()).collect(Collectors.toSet()); final Map<Long, Source> sourcesById = getIdsWithEntities(ssc, sourceIds); final Map<Document, Source> result = Maps.newHashMapWithExpectedSize(documents.size()); for (final Document document : documents) { final Source source = sourcesById.get(document.getSourceId()); result.put(document, source);// ww w. ja v a 2 s . c om } return result; }
From source file:natalia.dymnikova.cluster.scheduler.impl.NodeSearcher.java
public CompletableFuture<List<Optional<Address>>> search(final Remote operator) { final Timeout timeout = new Timeout(Duration.create(5, TimeUnit.SECONDS)); final CheckFlow msg = CheckFlow.newBuilder().setOperator(wrap(codec.packObject(operator))).build(); final Collection<Member> members = JavaConversions.asJavaCollection(cluster.readView().members()); if (members.isEmpty()) { return immediateFailedFuture(new IllegalStateException("No cluster members found")); }/* ww w. ja va2s . c om*/ @SuppressWarnings("unchecked") final CompletableFuture<Optional<Address>>[] futures = members.stream() .filter(member -> member.hasRole("compute")) .filter(member -> checker.check(operator, member.getRoles())).map(m -> computePool(m.address())) .map(actorPath -> { log.debug("Message: {} to {}", lazyDebugString(msg), actorPath); return toJava(adapter.ask(adapter.actorSelection(actorPath), msg, timeout)).thenApply(o -> { log.debug("actor: {} answer: {}", actorPath, o.getClass().getName()); if (o instanceof Flow.State.Ok) { return Optional.of(actorPath.address()); } else { return Optional.empty(); } }).exceptionally(t -> { log.error("Exception happened when sending {} to {}", lazyDebugString(msg), actorPath, t); return Optional.empty(); }); }).toArray(CompletableFuture[]::new); return allOf(futures); }
From source file:com.samsung.sjs.theorysolver.TheorySolverTest.java
/** * This tests the {@link TheorySolver} using a theory which has a random set of * blacklisted objects. We verify that the TheorySolver always finds the entire * set of non-blacklisted objects./*w w w .j a v a 2 s. co m*/ */ @Test public void testBasics() { List<Object> all = Arrays.asList("a", "b", "c", "d", "e", "f", "g", "h"); for (int i = 0; i < 100; ++i) { Random r = new Random(SEED + i); Collection<Object> truthy = all.stream().filter(x -> r.nextBoolean()).collect(Collectors.toSet()); Theory<Object, Void> theory = positive -> { Collection<Object> bad = positive.stream().filter(x -> !truthy.contains(x)) .collect(Collectors.toSet()); if (bad.size() > 0) { // Construct a random, nonempty unsat core. Collection<Object> unsat = new HashSet<>(); unsat.add(bad.iterator().next()); bad.stream().filter(x -> r.nextBoolean()).forEach(unsat::add); return Either.right(unsat); } else { return Either.left(null); } }; Pair<Void, Collection<Object>> result = TheorySolver.solve(theory, new SatFixingSetFinder<>(new Sat4J()), Collections.emptyList(), all); Assert.assertEquals(all.size() - truthy.size(), result.getRight().size()); Assert.assertEquals(truthy, all.stream().filter(x -> !result.getRight().contains(x)).collect(Collectors.toSet())); } }
From source file:com.teradata.benchto.driver.loader.BenchmarkLoader.java
private void printFormattedBenchmarksInfo(String formatString, Collection<Benchmark> benchmarks) { LOGGER.info(format(formatString, "Benchmark Name", "Data Source", "Runs", "Prewarms", "Concurrency")); benchmarks.stream()//from w ww .ja v a 2 s . co m .map(benchmark -> format(formatString, benchmark.getName(), benchmark.getDataSource(), benchmark.getRuns() + "", benchmark.getPrewarmRuns() + "", benchmark.getConcurrency() + "")) .distinct().forEach(LOGGER::info); }
From source file:io.gravitee.management.service.impl.PageServiceImpl.java
@Override public List<PageListItem> findByApi(String apiId) { try {//from w w w.j a v a2 s. c om final Collection<Page> pages = pageRepository.findByApi(apiId); if (pages == null) { return emptyList(); } return pages.stream().map(this::reduce) .sorted((o1, o2) -> Integer.compare(o1.getOrder(), o2.getOrder())).collect(Collectors.toList()); } catch (TechnicalException ex) { LOGGER.error("An error occurs while trying to get API pages using api ID {}", apiId, ex); throw new TechnicalManagementException( "An error occurs while trying to get API pages using api ID " + apiId, ex); } }
From source file:at.grahsl.kafka.connect.mongodb.MongoDbSinkTask.java
List<? extends WriteModel<BsonDocument>> buildWriteModelCDC(Collection<SinkRecord> records, String collectionName) {/*from w ww.jav a2s . c o m*/ LOGGER.debug("building CDC write model for {} record(s) into collection {}", records.size(), collectionName); return records.stream().map(sinkConverter::convert).map(cdcHandlers.get(collectionName)::handle) .flatMap(o -> o.map(Stream::of).orElseGet(Stream::empty)).collect(Collectors.toList()); }
From source file:com.openshift.internal.restclient.model.v1.ReplicationControllerTest.java
@Test public void testEnvironmentVariableForANamedContainer() { rc.setEnvironmentVariable("ruby-helloworld-database", "fooz", "balls"); Collection<IEnvironmentVariable> envVars = rc.getEnvironmentVariables("ruby-helloworld-database"); Optional<IEnvironmentVariable> envVar = envVars.stream().filter(e -> "fooz".equals(e.getName())) .findFirst();//from w w w. j a v a 2 s. c o m assertTrue("Exp. to find env var", envVar.isPresent()); assertEquals("balls", envVar.get().getValue()); }
From source file:org.fcrepo.apix.binding.impl.RuntimeExtensionBinding.java
@Override public Collection<Extension> getExtensionsFor(final WebResource resource, final Collection<Extension> extensions) { try (final InputStream resourceContent = resource.representation()) { final byte[] content = IOUtils.toByteArray(resourceContent); final Set<URI> rdfTypes = extensions.stream().flatMap(RuntimeExtensionBinding::getExtensionResource) .peek(r -> LOG.debug("Examinining the ontology closure of extension {}", r.uri())) .map(ontologySvc::parseOntology) .flatMap(o -> ontologySvc.inferClasses(resource.uri(), cached(resource, content), o).stream()) .peek(rdfType -> LOG.debug("Instance {} is of class {}", resource.uri(), rdfType)) .collect(Collectors.toSet()); return extensions.stream() .peek(e -> LOG.debug("Extension {} binds to instances of {}", e.uri(), e.bindingClass())) .filter(e -> rdfTypes.contains(e.bindingClass())) .peek(e -> LOG.debug("Extension {} bound to instance {} via {}", e.uri(), resource.uri(), e.bindingClass())) .collect(Collectors.toList()); } catch (final IOException e) { throw new RuntimeException(e); }/*from w w w.j av a2s . com*/ }
From source file:org.zalando.riptide.Router.java
final <A> Capture route(final ClientHttpResponse response, final List<HttpMessageConverter<?>> converters, final Selector<A> selector, final Collection<Binding<A>> bindings) { final Optional<A> attribute; try {/*from w ww. j ava 2 s. c o m*/ attribute = selector.attributeOf(response); } catch (final IOException e) { throw new RestClientException("Unable to retrieve attribute of response", e); } final Map<Optional<A>, Binding<A>> index = bindings.stream() .collect(toMap(Binding::getAttribute, identity(), this::denyDuplicates, LinkedHashMap::new)); final Optional<Binding<A>> match = selector.select(attribute, index); try { if (match.isPresent()) { final Binding<A> binding = match.get(); try { return binding.execute(response, converters); } catch (final NoRouteException e) { return propagateNoMatch(response, converters, index, e); } } else { return routeNone(response, converters, index); } } catch (final IOException e) { throw new RestClientException("Unable to execute binding", e); } }