List of usage examples for java.util.stream Collectors toSet
public static <T> Collector<T, ?, Set<T>> toSet()
From source file:de.flashpixx.rrd_antlr4.generator.IBaseGenerator.java
@Override public final IGenerator generate(final File p_grammar) { final File l_outputdirectory = this.processoutputdirectory(p_grammar); try {//from w w w . j av a 2 s .c o m return this.processmessages(p_grammar, l_outputdirectory, ENGINE.generate(m_baseoutput, l_outputdirectory, p_grammar, m_docuclean, m_imports, m_templates)); } catch (final IOException l_exception) { return this.processmessages(p_grammar, l_outputdirectory, Collections.unmodifiableSet(Stream.of(l_exception.getMessage()).collect(Collectors.toSet()))); } }
From source file:delfos.dataset.basic.loader.types.DatasetLoaderAbstract.java
@Override public ContentDataset getContentDataset() throws CannotLoadContentDataset { RatingsDataset<RatingType> ratingsDataset = getRatingsDataset(); return new ContentDatasetDefault(ratingsDataset.allRatedItems().stream().map(idItem -> new Item(idItem)) .collect(Collectors.toSet())); }
From source file:org.n52.iceland.request.operator.RequestOperatorRepository.java
public Set<RequestOperatorKey> getActiveRequestOperatorKeys(ServiceOperatorKey sok) { return activeRequestOperatorStream(sok).map(Entry::getKey).collect(Collectors.toSet()); }
From source file:com.ikanow.aleph2.search_service.elasticsearch.utils.ElasticsearchHadoopUtils.java
/** * @param input_config - the input settings * @return//from w w w . j a v a 2s. c o m */ @SuppressWarnings({ "rawtypes", "unchecked" }) public static IAnalyticsAccessContext<InputFormat> getInputFormat(final Client client, final AnalyticThreadJobBean.AnalyticThreadJobInputBean job_input) { return new IAnalyticsAccessContext<InputFormat>() { private LinkedHashMap<String, Object> _mutable_output = null; @Override public String describe() { //(return the entire thing) return ErrorUtils.get("service_name={0} options={1}", this.getAccessService().right().value().getSimpleName(), this.getAccessConfig().get()); } /* (non-Javadoc) * @see com.ikanow.aleph2.data_model.interfaces.data_analytics.IAnalyticsAccessContext#getAccessService() */ @Override public Either<InputFormat, Class<InputFormat>> getAccessService() { return Either.right((Class<InputFormat>) (Class<?>) Aleph2EsInputFormat.class); } /* (non-Javadoc) * @see com.ikanow.aleph2.data_model.interfaces.data_analytics.IAnalyticsAccessContext#getAccessConfig() */ @Override public Optional<Map<String, Object>> getAccessConfig() { if (null != _mutable_output) { return Optional.of(_mutable_output); } _mutable_output = new LinkedHashMap<>(); // Check for input record limit: Optional.ofNullable(job_input.config()).map(cfg -> cfg.test_record_limit_request()).ifPresent( max -> _mutable_output.put(Aleph2EsInputFormat.BE_DEBUG_MAX_SIZE, Long.toString(max))); final String index_resource = ElasticsearchContext.READ_PREFIX + ElasticsearchIndexUtils.getBaseIndexName(BeanTemplateUtils.build(DataBucketBean.class) .with(DataBucketBean::full_name, job_input.resource_name_or_id()).done().get(), Optional.empty()) + "*"; //TODO (ALEPH-72): support multi-buckets / buckets with non-standard indexes ... also use the tmin/tmax // (needs MDB to pull out - because need to get the full bucket ugh) // Currently need to add types: //TODO (ALEPH-72): 2.2.0 you _can_ just put "indexes/" to get all types - that doesn't work for all es-hadoop code though final Multimap<String, String> index_type_mapping = ElasticsearchIndexUtils.getTypesForIndex(client, index_resource); final String type_resource = index_type_mapping.values().stream().collect(Collectors.toSet()) .stream().collect(Collectors.joining(",")); final String final_index = getTimedIndexes(job_input, index_type_mapping, new Date()) .map(s -> Stream .concat(s, TimeSliceDirUtils.getUntimedDirectories( index_type_mapping.keySet().stream())) .collect(Collectors.joining(","))) .orElse(index_resource); _mutable_output.put("es.resource", final_index + "/" + type_resource); _mutable_output.put("es.read.metadata", "true"); _mutable_output.put("es.read.metadata.field", Aleph2EsInputFormat.ALEPH2_META_FIELD); _mutable_output.put("es.index.read.missing.as.empty", "yes"); _mutable_output.put("es.query", Optional.ofNullable(job_input.filter()).map(f -> f.get("technology_override")).map(o -> { return (o instanceof String) ? o.toString() : _mapper.convertValue(o, JsonNode.class).toString(); }).orElse("?q=*")); //TODO (ALEPH-72) (incorporate tmin/tmax and also add a JSON mapping for the Aleph2 crud utils) // Here are the parameters that can be set: // es.query ... can be stringified JSON or a q=string .... eg conf.set("es.query", "?q=me*"); //config.set("es.resource", overallIndexNames.toString()); .. .this was in the format X,Y,Z[/type],,etc which then got copied to // create a simple multi-input format .. looks like i didn't do anything if no type was set, unclear if that was an optimization // or if it doesn't work... (if it doesn't work then what you have to do is get the mappings for each index and // get the types and insert them all) //config.set("es.index.read.missing.as.empty", "yes"); // (not sure if need to set just normal http port/host?) //config.set("es.net.proxy.http.host", "localhost"); //config.set("es.net.proxy.http.port", "8888"); return Optional.of(Collections.unmodifiableMap(_mutable_output)); } }; }
From source file:com.bitvantage.bitvantagetypes.collections.TreeBidirectionalMap.java
@Override public Set<Entry<K, V>> entrySet() { return map.entrySet().stream() .map((Entry<ComparableWrapper<K>, ComparableWrapper<V>> e) -> new SimpleImmutableEntry<K, V>( e.getKey().getWrapped(), e.getValue().getWrapped())) .collect(Collectors.toSet()); }
From source file:io.pivotal.xd.chaoslemur.infrastructure.StandardDirectorUtils.java
@SuppressWarnings("unchecked") @Override/*from w ww . ja v a2 s. c o m*/ public Set<String> getDeployments() { URI deploymentsUri = UriComponentsBuilder.fromUri(this.root).path("deployments").build().toUri(); List<Map<String, String>> deployments = this.restTemplate.getForObject(deploymentsUri, List.class); return deployments.stream().map(deployment -> deployment.get("name")).collect(Collectors.toSet()); }
From source file:de.ks.flatadocdb.metamodel.Parser.java
private Map<Field, PropertyPersister> resolvePropertyPersisters(Set<Field> allFields) { HashMap<Field, PropertyPersister> retval = new HashMap<>(); Set<Field> fields = allFields.stream().filter(f -> f.isAnnotationPresent(Property.class)) .collect(Collectors.toSet()); for (Field field : fields) { Property annotation = field.getAnnotation(Property.class); Class<? extends PropertyPersister> persisterClass = annotation.value(); PropertyPersister instance = getInstance(persisterClass); retval.put(field, instance);// w w w .j a v a 2s . com } return retval; }
From source file:io.gravitee.repository.redis.management.RedisMembershipRepository.java
@Override public Set<Membership> findByUserAndReferenceTypeAndMembershipType(String userId, MembershipReferenceType referenceType, String membershipType) throws TechnicalException { Set<RedisMembership> memberships = membershipRedisRepository.findByUserAndReferenceType(userId, referenceType.name());/* w w w .jav a 2 s . co m*/ if (membershipType == null) { return memberships.stream().map(this::convert).collect(Collectors.toSet()); } else { return memberships.stream().filter(membership -> membershipType.equals(membership.getType())) .map(this::convert).collect(Collectors.toSet()); } }
From source file:com.pinterest.rocksplicator.controller.tasks.AddHostTask.java
@Override public void process(Context ctx) throws Exception { final String clusterName = ctx.getCluster(); final String hdfsDir = getParameter().getHdfsDir(); final HostBean hostToAdd = getParameter().getHostToAdd(); final int rateLimitMbs = getParameter().getRateLimitMbs(); final Admin.Client client = clientFactory.getClient(hostToAdd); // 1) ping the host to add to make sure it's up and running. try {//from ww w. j a va 2 s . c om client.ping(); // continue if #ping() succeeds. } catch (TException tex) { ctx.getTaskQueue().failTask(ctx.getId(), "Host to add is not alive!"); return; } ClusterBean clusterBean = ZKUtil.getClusterConfig(zkClient, clusterName); if (clusterBean == null) { ctx.getTaskQueue().failTask(ctx.getId(), "Failed to read cluster config from zookeeper."); return; } for (SegmentBean segment : clusterBean.getSegments()) { // 2) find shards to serve for new host Set<Integer> shardToServe = IntStream.range(0, segment.getNumShards()).boxed() .collect(Collectors.toSet()); for (HostBean host : segment.getHosts()) { // ignore hosts in different AZ than the new host if (host.getAvailabilityZone().equals(hostToAdd.getAvailabilityZone())) { host.getShards().forEach(shard -> shardToServe.remove(shard.getId())); } } // 3) upload shard data to the new host try { for (int shardId : shardToServe) { HostBean upstream = findMasterShard(shardId, segment.getHosts()); if (upstream == null) { //TODO: should we fail the task in this case? LOG.error("Failed to find master shard for segment={}, shardId={}", segment.getName(), shardId); continue; } Admin.Client upstreamClient = clientFactory.getClient(upstream); String dbName = ShardUtil.getDBNameFromSegmentAndShardId(segment.getName(), shardId); String hdfsPath = ShardUtil.getHdfsPath(hdfsDir, clusterName, segment.getName(), shardId, upstream.getIp(), getCurrentDateTime()); upstreamClient.backupDB(new BackupDBRequest(dbName, hdfsPath).setLimit_mbs(rateLimitMbs)); LOG.info("Backed up {} from {} to {}.", dbName, upstream.getIp(), hdfsPath); client.restoreDB( new RestoreDBRequest(dbName, hdfsPath, upstream.getIp(), (short) upstream.getPort()) .setLimit_mbs(rateLimitMbs)); LOG.info("Restored {} from {} to {}.", dbName, hdfsPath, hostToAdd.getIp()); } } catch (TException ex) { String errMsg = String.format("Failed to upload shard data to host %s.", hostToAdd.getIp()); LOG.error(errMsg, ex); ctx.getTaskQueue().failTask(ctx.getId(), errMsg); return; } // add shard config to new host hostToAdd.setShards(shardToServe.stream().map(id -> new ShardBean().setId(id).setRole(Role.SLAVE)) .collect(Collectors.toList())); List<HostBean> newHostList = segment.getHosts(); newHostList.add(hostToAdd); segment.setHosts(newHostList); } // 4) update cluster config in zookeeper ZKUtil.updateClusterConfig(zkClient, clusterBean); LOG.info("Updated config to {}", ConfigParser.serializeClusterConfig(clusterBean)); ctx.getTaskQueue().finishTask(ctx.getId(), "Successfully added host " + hostToAdd.getIp() + ":" + hostToAdd.getPort()); }
From source file:com.vsct.dt.hesperides.indexation.search.TemplateSearch.java
public Set<TemplateSearchResponse> getTemplatesByExactNamespace(final String namespace) { String url = "/templates/_search"; String body = TemplateContentGenerator.from(mustacheSearchByExactNamespace).put("namespace", namespace) .generate();// w w w . j a v a2 s .com ElasticSearchResponse<TemplateSearchResponse> esResponse = elasticSearchClient .withResponseReader(elasticSearchTemplateReader).post(url, body); //Filter to retain only exact namespace return esResponse.streamOfData().filter(template -> template.getNamespace().equals(namespace)) .collect(Collectors.toSet()); }