List of usage examples for java.util.stream Collectors joining
public static Collector<CharSequence, ?, String> joining(CharSequence delimiter)
From source file:alluxio.cli.fsadmin.report.CapacityCommand.java
/** * Gets the formatted tier values of a worker. * * @param map the map to get worker tier values from * @param workerName name of the worker// w ww . j a v a 2 s . c o m * @return the formatted tier values of the input worker name */ private static String getWorkerFormattedTierValues(Map<String, Map<String, String>> map, String workerName) { return map.values().stream() .map((tierMap) -> (String.format("%-14s", tierMap.getOrDefault(workerName, "-")))) .collect(Collectors.joining("")); }
From source file:com.thinkbiganalytics.jobrepo.rest.controller.FeedsRestController.java
@GET @Path("/query/{feedId}") @Produces(MediaType.TEXT_PLAIN)//from w ww. java 2 s. c om @ApiOperation("Gets the name of the feed matching the feedId.") @ApiResponses(@ApiResponse(code = 200, message = "Returns the feed name.", response = String.class)) public String getFeedName(@PathParam("feedId") String feedId) { return metadataAccess.read(() -> { String filter = "id.uuid==" + feedId; List<OpsManagerFeed> feeds = ((OpsFeedManagerFeedProvider) opsFeedManagerFeedProvider) .findFeedsWithFilter(filter); if (feeds != null) { return feeds.stream().map(f -> f.getName()).collect(Collectors.joining(",")); } return "NOT FOUND"; }); }
From source file:com.ikanow.aleph2.analytics.storm.utils.TestStormControllerUtil.java
@Test public void test_stopAllJobs() throws Exception { // Some beans final DataBucketBean bucket = BeanTemplateUtils.clone(createBucket()) .with(DataBucketBean::full_name, "/test/stop/jobs").done(); final SharedLibraryBean library_tech = BeanTemplateUtils.build(SharedLibraryBean.class) .with(SharedLibraryBean::_id, "_test_lib").with(SharedLibraryBean::path_name, "/test/lib").done() .get();//from w w w. j a v a2 s . c om final SharedLibraryBean library_mod = BeanTemplateUtils.build(SharedLibraryBean.class) .with(SharedLibraryBean::_id, "_test_module").with(SharedLibraryBean::path_name, "/test/module") .done().get(); // Context final MockAnalyticsContext test_analytics_context = _app_injector.getInstance(MockAnalyticsContext.class); final StreamingEnrichmentContextService context = new StreamingEnrichmentContextService( test_analytics_context); test_analytics_context.setBucket(bucket); test_analytics_context.setTechnologyConfig(library_tech); test_analytics_context.resetLibraryConfigs(ImmutableMap.<String, SharedLibraryBean>builder() .put(library_mod.path_name(), library_mod).put(library_mod._id(), library_mod).build()); // Job1 { final IEnrichmentStreamingTopology enrichment_topology = new SampleStormStreamTopology1(); final StormTopology storm_top = (StormTopology) enrichment_topology .getTopologyAndConfiguration(bucket, context)._1(); final String cached_jar_dir = System.getProperty("java.io.tmpdir"); StormControllerUtil.startJob(storm_cluster, bucket, Optional.of("job1_name"), Collections.emptyList(), Collections.emptyList(), storm_top, Collections.emptyMap(), cached_jar_dir); } // Job2 { final IEnrichmentStreamingTopology enrichment_topology = new SampleStormStreamTopology1(); final StormTopology storm_top = (StormTopology) enrichment_topology .getTopologyAndConfiguration(bucket, context)._1(); final String cached_jar_dir = System.getProperty("java.io.tmpdir"); StormControllerUtil.startJob(storm_cluster, bucket, Optional.of("job2_name"), Collections.emptyList(), Collections.emptyList(), storm_top, Collections.emptyMap(), cached_jar_dir); } // Unrelated job { final IEnrichmentStreamingTopology enrichment_topology = new SampleStormStreamTopology1(); final StormTopology storm_top = (StormTopology) enrichment_topology .getTopologyAndConfiguration(bucket, context)._1(); final String cached_jar_dir = System.getProperty("java.io.tmpdir"); StormControllerUtil.startJob(storm_cluster, BeanTemplateUtils.clone(bucket).with(DataBucketBean::full_name, "/test/stop/jobs/1").done(), Optional.of("job2_name"), Collections.emptyList(), Collections.emptyList(), storm_top, Collections.emptyMap(), cached_jar_dir); } // OK test: check we list all the names final LocalStormController storm_controller = (LocalStormController) storm_cluster; { List<String> jobs = storm_controller.getJobNamesForBucket(bucket.full_name()); assertEquals("test_stop_jobs_job1_name__dd6725792433 ; test_stop_jobs_job2_name__dd6725792433", jobs.stream().sorted().collect(Collectors.joining(" ; "))); } for (int ii = 0; ii < 60; ++ii) { final TopologyInfo info1 = StormControllerUtil.getJobStats(storm_cluster, StormControllerUtil.bucketPathToTopologyName(bucket, Optional.of("job1_name"))); final TopologyInfo info2 = StormControllerUtil.getJobStats(storm_cluster, StormControllerUtil.bucketPathToTopologyName(bucket, Optional.of("job1_name"))); if (info1.get_status().equals("ACTIVE") && info2.get_status().equals("ACTIVE")) { break; } } Thread.sleep(5000L); // (wait a bit for the jobs to be fully started) StormControllerUtil.stopAllJobsForBucket(storm_cluster, bucket); // wait for jobs to die: for (int ii = 0; ii < 60; ++ii) { Thread.sleep(1000L); List<String> jobs = storm_controller.getJobNamesForBucket(bucket.full_name()); if (jobs.isEmpty()) break; } { List<String> jobs = storm_controller.getJobNamesForBucket(bucket.full_name()); assertTrue("All jobs for this bucket removed: " + jobs.stream().collect(Collectors.joining(" ; ")), jobs.isEmpty()); } // Check the other job is still alive: { List<String> other_jobs = storm_controller.getJobNamesForBucket("/test/stop/jobs/1"); assertEquals("Just the one job: " + other_jobs.stream().collect(Collectors.joining(" ; ")), 1, other_jobs.size()); } }
From source file:com.loyalty.controllers.DashboardController.java
private void renewPromotions() { String promotions = null;//from w w w . j a v a 2 s . c o m try { promotions = getService().getPromotions().stream().map(promo -> promo.getDescription()) .collect(Collectors.joining("; ")); } catch (IOException e) { e.printStackTrace(); } String finalPromotions = promotions; Platform.runLater(() -> lbl_promotion.setText(finalPromotions)); }
From source file:org.lambdamatic.internal.elasticsearch.codec.DocumentCodec.java
/** * Analyze the given domain type and returns the name of its field to use as the document id, if * available./*w w w . j a va 2 s . c o m*/ * * @param domainType the domain type to analyze * @return the <strong>single</strong> Java field annotated with {@link DocumentIdField}. If no field * matches the criteria or more than one field is matches these criteria, a * {@link MappingException} is thrown. */ public static Field getIdField(final Class<?> domainType) { final List<Pair<Field, DocumentIdField>> candidateFields = Stream.of(domainType.getDeclaredFields()) .map(field -> new Pair<>(field, field.getAnnotation(DocumentIdField.class))) .filter(pair -> pair.getRight() != null).collect(Collectors.toList()); if (candidateFields.isEmpty()) { throw new MappingException("No field is annotated with @{} in type {}", DocumentIdField.class.getName(), domainType); } else if (candidateFields.size() > 1) { final String fieldNames = candidateFields.stream().map(pair -> pair.getLeft().getName()) .collect(Collectors.joining(", ")); throw new MappingException("More than one field is annotated with @{} in type {}: {}", DocumentIdField.class.getName(), domainType, fieldNames); } return candidateFields.get(0).getLeft(); }
From source file:ddf.catalog.ftp.ftplets.FtpRequestHandler.java
private FtpletResult store(FtpSession session, FtpRequest request, boolean isStoreUnique) throws FtpException, IOException { LOGGER.debug("Beginning FTP ingest of {}", request.getArgument()); Subject shiroSubject = (Subject) session.getAttribute(SUBJECT); if (shiroSubject == null) { return FtpletResult.DISCONNECT; }//from w w w . ja v a 2 s. com FtpFile ftpFile = null; String fileName = request.getArgument(); try { ftpFile = session.getFileSystemView().getFile(fileName); } catch (FtpException e) { LOGGER.debug("Failed to retrieve file from FTP session"); } String requestTypeString = isStoreUnique ? STOU_REQUEST : STOR_REQUEST; if (ftpFile == null) { LOGGER.debug("Sending FTP status code 501 to client - syntax errors in request parameters"); session.write(new DefaultFtpReply(FtpReply.REPLY_501_SYNTAX_ERROR_IN_PARAMETERS_OR_ARGUMENTS, requestTypeString)); throw new FtpException("File to be transferred from client did not exist"); } DataConnectionFactory connFactory = session.getDataConnection(); if (connFactory instanceof IODataConnectionFactory) { InetAddress address = ((IODataConnectionFactory) connFactory).getInetAddress(); if (address == null) { session.write(new DefaultFtpReply(FtpReply.REPLY_503_BAD_SEQUENCE_OF_COMMANDS, "PORT or PASV must be issued first")); LOGGER.debug("Sending FTP status code 503 to client - PORT or PASV must be issued before STOR"); throw new FtpException("FTP client address was null"); } } if (!ftpFile.isWritable()) { session.write( new DefaultFtpReply(FtpReply.REPLY_550_REQUESTED_ACTION_NOT_TAKEN, "Insufficient permissions")); LOGGER.debug("Sending FTP status code 550 to client - insufficient permissions to write file."); throw new FtpException("Insufficient permissions to write file"); } session.write(new DefaultFtpReply(FtpReply.REPLY_150_FILE_STATUS_OKAY, requestTypeString + " " + fileName)); LOGGER.debug("Replying to client with code 150 - file status okay"); if (isDotFile(request.getArgument())) { DataConnection dataConnection; try { dataConnection = connFactory.openConnection(); } catch (Exception e) { throw new IOException("Error getting the output stream from FTP session", e); } dataConnection.transferFromClient(session, addTempFileToSession(session, ftpFile.getAbsolutePath(), new TemporaryFileBackedOutputStream())); if (isStoreUnique) { session.write(new DefaultFtpReply(FtpReply.REPLY_125_DATA_CONNECTION_ALREADY_OPEN, "Storing data with unique name: " + fileName)); } session.write( new DefaultFtpReply(FtpReply.REPLY_226_CLOSING_DATA_CONNECTION, "Closing data connection")); LOGGER.debug("Sending FTP status code 226 to client - closing data connection"); } else { try (TemporaryFileBackedOutputStream outputStream = new TemporaryFileBackedOutputStream()) { DataConnection dataConnection = connFactory.openConnection(); dataConnection.transferFromClient(session, outputStream); CreateStorageRequest createRequest = getCreateStorageRequest(fileName, outputStream); List<Metacard> storedMetacards = storeObject(shiroSubject, fileName, createRequest); if (isStoreUnique && !storedMetacards.isEmpty()) { String ids = storedMetacards.stream().map(Metacard::getId).collect(Collectors.joining(",")); session.write(new DefaultFtpReply(FtpReply.REPLY_125_DATA_CONNECTION_ALREADY_OPEN, "Storing data with unique name: " + ids)); } session.write( new DefaultFtpReply(FtpReply.REPLY_226_CLOSING_DATA_CONNECTION, "Closing data connection")); LOGGER.debug("Sending FTP status code 226 to client - closing data connection"); } catch (FtpException fe) { throw new FtpException("Failure to create metacard for file " + fileName, fe); } catch (Exception e) { throw new IOException("Error getting the output stream from FTP session", e); } finally { session.getDataConnection().closeDataConnection(); } } return FtpletResult.SKIP; }
From source file:org.ng200.openolympus.services.TestingService.java
private void checkVerdict(final Verdict verdict, final SolutionJudge judge, final List<Path> testFiles, final BigDecimal maximumScore, final Properties properties) throws ExecutionException { if (this.dataProvider == null) { throw new IllegalStateException("Shared data provider is null!"); }//w ww . j a v a2 s .com final Lock lock = verdict.getSolution().getTask().readLock(); lock.lock(); try { TestingService.logger.info("Scheduling verdict {} for testing.", verdict.getId()); final JPPFJob job = new JPPFJob(); job.setDataProvider(this.dataProvider); job.setName("Check verdict " + verdict.getId()); final int priority = (int) ((verdict.isViewableWhenContestRunning() ? (Integer.MAX_VALUE / 2) : 0) - verdict.getId()); job.getSLA().setMaxNodes(1); job.getSLA().setPriority(priority); job.getSLA().setDispatchExpirationSchedule(new JPPFSchedule(60000L)); job.getSLA().setMaxDispatchExpirations(3); TaskContainer taskContainer = taskContainerCache .getTaskContainerForTask(verdict.getSolution().getTask()); Thread.currentThread().setContextClassLoader( new URLClassLoader(taskContainer.getClassLoaderURLs().toArray(new URL[0]), Thread.currentThread().getContextClassLoader())); job.add(new JacksonSerializationDelegatingTask<>( new VerdictCheckingTask(judge, testFiles, maximumScore, properties), taskContainer.getClassLoaderURLs())); job.setBlocking(true); jppfClient.registerClassLoader(taskContainer.getClassLoader(), job.getUuid()); this.jppfClient.submitJob(job); @SuppressWarnings("unchecked") final org.jppf.node.protocol.Task<String> task = (org.jppf.node.protocol.Task<String>) job .awaitResults().get(0); if (task.getThrowable() != null) { throw task.getThrowable(); } ObjectMapper objectMapper = JacksonSerializationFactory.createObjectMapper(); final JsonTaskExecutionResult<Pair<SolutionJudge, SolutionResult>> checkingResult = ((JacksonSerializationDelegatingTask<Pair<SolutionJudge, SolutionResult>, VerdictCheckingTask>) job .awaitResults().get(0)).getResultOrThrowable(); if (checkingResult.getError() != null) { throw checkingResult.getError(); } final SolutionResult result = checkingResult.getResult().getSecond(); verdict.setScore(result.getScore()); verdict.setMemoryPeak(result.getMemoryPeak()); verdict.setCpuTime(Duration.ofMillis(result.getCpuTime())); verdict.setRealTime(Duration.ofMillis(result.getRealTime())); verdict.setStatus(result.getResult()); switch (result.getResult()) { case OK: case TIME_LIMIT: case MEMORY_LIMIT: case OUTPUT_LIMIT: case PRESENTATION_ERROR: case WRONG_ANSWER: case RUNTIME_ERROR: break; case INTERNAL_ERROR: result.getErrorMessages() .forEach((stage, message) -> this.internalErrors.put(this.internalErrorCounter++, new Pair<String, String>(verdict.getSolution().getTask().getName(), message))); break; case SECURITY_VIOLATION: verdict.setUnauthorisedSyscall(result.getUnauthorisedSyscall()); break; case COMPILE_ERROR: final String message = result.getErrorMessages().values().stream() .collect(Collectors.joining("\n")); verdict.setAdditionalInformation( HtmlUtils.htmlEscape(message.substring(0, Math.min(128, message.length())))); break; case WAITING: throw new IllegalStateException("Judge returned result \"waiting\"."); } } catch (final Throwable throwable) { verdict.setStatus(SolutionResult.Result.INTERNAL_ERROR); throw new RuntimeException("Couldn't run solution: ", throwable); } finally { lock.unlock(); verdict.setTested(true); if (verdict.getStatus() == SolutionResult.Result.WAITING) { verdict.setStatus(SolutionResult.Result.INTERNAL_ERROR); TestingService.logger.error( "Judge for task {} did not set the result status to an acceptable value: got WAITING instead.", verdict.getSolution().getTask().getId()); } this.solutionService.saveVerdict(verdict); } }
From source file:ddf.catalog.source.solr.SolrFilterDelegate.java
private String anyTextSolrQuery(String searchPhrase, boolean isCaseSensitive) { String solrQuery = resolver.anyTextFields().map(resolver::getWhitespaceTokenizedField) .map((whitespaceField) -> { if (isCaseSensitive) { return resolver.getCaseSensitiveField(whitespaceField); } else { return whitespaceField; }//from www. j a v a 2 s .com }).map((field) -> field + ":" + searchPhrase).collect(Collectors.joining(" ")); return "(" + solrQuery + ")"; }
From source file:com.ikanow.aleph2.search_service.elasticsearch.utils.ElasticsearchHiveUtils.java
/** Handles the prefix and suffix of the full hive schema * https://www.elastic.co/guide/en/elasticsearch/hadoop/current/hive.html * @param table_name - if empty then "main_table" * @param bucket/*from w ww. j a va 2s.c o m*/ * @param schema * @param partial_hive_schema * @return */ public static Validation<String, String> generateFullHiveSchema(final Optional<String> table_name, final DataBucketBean bucket, final DataSchemaBean.DataWarehouseSchemaBean schema, Optional<Client> maybe_client, ElasticsearchIndexServiceConfigBean config) { // (ignore views for the moment) final String prefix = ErrorUtils.get("CREATE EXTERNAL TABLE {0} ", getTableName(bucket, schema)); final DataSchemaBean.DataWarehouseSchemaBean.Table table = table_name.flatMap(t -> Optionals .ofNullable(schema.views()).stream().filter(v -> t.equals(v.database_name())).findFirst()) .orElse(schema.main_table()); final JsonNode user_schema = _mapper.convertValue(table.table_format(), JsonNode.class); final Validation<String, String> partial_table = generatePartialHiveSchema(prefix, user_schema, true); // (for the main table, just going to be the full alias - for views will need to be cleverer) final String index = Optionals .of(() -> bucket.data_schema().search_index_schema().technology_override_schema() .get(SearchIndexSchemaDefaultBean.index_name_override_).toString()) .orElseGet(() -> "r__" + BucketUtils.getUniqueSignature(bucket.full_name(), Optional.empty())); final Optional<ElasticsearchHiveOverrideBean> maybe_override = Optionals .of(() -> schema.technology_override_schema()) .map(m -> BeanTemplateUtils.from(m, ElasticsearchHiveOverrideBean.class).get()); // OK all this horrible code is intended to sort out the list of types to apply in the hive query final Optional<ElasticsearchHiveOverrideBean.TableOverride> table_override = maybe_override .map(cfg -> cfg.table_overrides().get(table_name.orElse(MAIN_TABLE_NAME))); final Optional<Set<String>> user_type_overrides = table_override.map(t -> t.types()) .filter(l -> !l.isEmpty()).map(l -> new TreeSet<String>(l)); final Set<String> mutable_type_set = user_type_overrides.orElseGet(() -> { return new TreeSet<String>( maybe_client.map(client -> ElasticsearchIndexUtils.getTypesForIndex(client, index).values()) .orElse(Collections.emptySet())); }); final ElasticsearchIndexServiceConfigBean schema_config = ElasticsearchIndexConfigUtils .buildConfigBeanFromSchema(bucket, config, _mapper); final CollidePolicy collide_policy = Optionals .of(() -> schema_config.search_technology_override().collide_policy()) .orElse(CollidePolicy.new_type); Optionals.of(() -> schema_config.search_technology_override().type_name_or_prefix()).map(Optional::of) .orElseGet(() -> Optional.of((collide_policy == CollidePolicy.new_type) ? ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext.DEFAULT_PREFIX : ElasticsearchIndexServiceConfigBean.DEFAULT_FIXED_TYPE_NAME)) .ifPresent(type_or_prefix -> { if (!user_type_overrides.isPresent()) { // leave alone if manually specified if (collide_policy == CollidePolicy.new_type) { // add a few types //TODO (ALEPH-17): need to make this get auto populated as new types are added, see the ALEPH-17 comment in ElasticsearchIndexService if (mutable_type_set.size() < 10) { IntStream.rangeClosed(1, 10).boxed().map(i -> type_or_prefix + i.toString()) .forEach(type -> mutable_type_set.add(type)); } } else { // OK in this case just make sure the default type is represented mutable_type_set.add(type_or_prefix); } } }); final String suffix = Optional.of(" STORED BY 'org.elasticsearch.hadoop.hive.EsStorageHandler' ") .map(s -> s + ErrorUtils.get( "TBLPROPERTIES(''es.index.auto.create'' = ''false'', ''es.resource'' = ''{0}/{1}''", index, mutable_type_set.stream().collect(Collectors.joining(",")))) .map(s -> table_override.map(t -> t.name_mappings()).filter(m -> !m.isEmpty()) .map(m -> s + ", 'es.mapping.names' = '" + m.entrySet().stream().map(kv -> kv.getKey() + ":" + kv.getValue()) .collect(Collectors.joining(",")) + "'") .orElse(s)) .map(s -> table_override .flatMap(t -> Optional.ofNullable(t.url_query()).map(ss -> "?" + ss).map(Optional::of) .orElseGet(() -> Optional.ofNullable(t.json_query()) .map(jq -> _mapper.convertValue(jq, JsonNode.class).toString()))) .map(ss -> s + ", 'es.query' = '" + ss + "'").orElse(s)) .map(s -> s + ") ").get(); return partial_table.map(s -> s + suffix); }
From source file:com.spotify.styx.api.BackfillResource.java
public Response<Backfill> postBackfill(BackfillInput input) { final BackfillBuilder builder = Backfill.newBuilder(); final String id = RandomGenerator.DEFAULT.generateUniqueId("backfill"); final Schedule schedule; final WorkflowId workflowId = WorkflowId.create(input.component(), input.workflow()); final Set<WorkflowInstance> activeWorkflowInstances; try {//w w w . j a va2 s. c o m activeWorkflowInstances = storage.readActiveWorkflowInstances(input.component()).keySet(); final Optional<Workflow> workflowOpt = storage.workflow(workflowId); if (!workflowOpt.isPresent()) { return Response.forStatus(Status.NOT_FOUND.withReasonPhrase("workflow not found")); } schedule = workflowOpt.get().configuration().schedule(); } catch (Exception e) { throw Throwables.propagate(e); } if (!TimeUtil.isAligned(input.start(), schedule)) { return Response .forStatus(Status.BAD_REQUEST.withReasonPhrase("start parameter not aligned with schedule")); } if (!TimeUtil.isAligned(input.end(), schedule)) { return Response .forStatus(Status.BAD_REQUEST.withReasonPhrase("end parameter not aligned with schedule")); } final List<WorkflowInstance> alreadyActive = rangeOfInstants(input.start(), input.end(), schedule).stream() .map(instant -> WorkflowInstance.create(workflowId, toParameter(schedule, instant))) .filter(activeWorkflowInstances::contains).collect(toList()); if (!alreadyActive.isEmpty()) { final String alreadyActiveMessage = alreadyActive.stream().map(WorkflowInstance::parameter) .collect(Collectors.joining(", ")); return Response.forStatus(Status.CONFLICT .withReasonPhrase("these partitions are already active: " + alreadyActiveMessage)); } builder.id(id).allTriggered(false).workflowId(workflowId).concurrency(input.concurrency()) .start(input.start()).end(input.end()).schedule(schedule).nextTrigger(input.start()).halted(false); final Backfill backfill = builder.build(); try { storage.storeBackfill(backfill); } catch (IOException e) { throw Throwables.propagate(e); } return Response.forPayload(backfill); }