Example usage for java.util.stream Collectors joining

List of usage examples for java.util.stream Collectors joining

Introduction

In this page you can find the example usage for java.util.stream Collectors joining.

Prototype

public static Collector<CharSequence, ?, String> joining(CharSequence delimiter) 

Source Link

Document

Returns a Collector that concatenates the input elements, separated by the specified delimiter, in encounter order.

Usage

From source file:com.acmutv.ontoqa.core.parser.AdvancedSltagParser.java

/**
 * Parses {@code sentence} with {@code grammar}.
 * @param sentence the sentence to parse.
 * @param grammar the grammar to parse with.
 * @return the parsed SLTAG.//w w  w  .  j  a  va  2  s. c om
 * @throws OntoqaParsingException when parsing fails.
 */
@Override
public Sltag parse(String sentence, Grammar grammar, Ontology ontology) throws Exception {
    ParserStateNew state = new ParserStateNew(sentence);

    SltagTokenizer tokenizer = new SimpleSltagTokenizer(grammar, sentence);

    /* PRE-PROCESSING */
    LOGGER.debug("[STATUS] :: PRE-PROCESSING");
    if (isAskSentence(sentence)) {
        LOGGER.debug("[PRE-PROCESSING] :: found ASK structure");
        state.setAsk(true);
    } else {
        LOGGER.debug("[PRE-PROCESSING] :: found SELECT structure");
        state.setAsk(false);
    }

    /* TOKENIZATION */
    LOGGER.debug("[STATUS] :: PROCESSING");
    while (tokenizer.hasNext()) {
        Token token = tokenizer.next();

        String lexPattern = token.getLexicalPattern();
        List<ElementarySltag> candidates = token.getCandidates();
        state.setIdxPrev(token.getPrev());

        LOGGER.debug("[PROCESSING] :: entry '{}'", lexPattern);

        if (candidates.isEmpty()) {
            throw new OntoqaParsingException("Cannot find SLTAG for entry: %s", lexPattern);
        }

        /* AMBIGUITIES MANAGEMENT */
        LOGGER.debug("[STATUS] :: AMBIGUITIES MANAGEMENT");
        if (candidates.size() > 1) {
            LOGGER.debug("[AMBIGUITIES MANAGEMENT] :: found {} ambiguities for entry '{}' (idxPrev: {})\n{}",
                    candidates.size(), lexPattern, state.getIdxPrev(),
                    candidates.stream().map(ElementarySltag::toPrettyString).collect(Collectors.joining("\n")));
            filterAmbiguities(candidates, state, ontology);
        } else {
            LOGGER.debug("[AMBIGUITIES MANAGEMENT] :: no ambiguities found");
        }

        /* QUEUE INSERTION */
        LOGGER.debug("[STATUS] :: QUEUE INSERTION");
        if (candidates.size() == 1) {
            Sltag candidate = candidates.get(0);
            if (candidate.isAdjunctable()) {
                LOGGER.debug("[QUEUE] :: enqueueing adjunction (entry: '{}' | idxPrev: {}):\n{}", lexPattern,
                        state.getIdxPrev(), candidate.toPrettyString());
                state.addWaitingAdjunction(candidate, state.getIdxPrev());
            } else if (candidate.isSentence()) {
                LOGGER.debug("[QUEUE] :: setting sentence (entry: '{}' | idxPrev: {}):\n{}", lexPattern,
                        state.getIdxPrev(), candidate.toPrettyString());
                if (state.getCurr() != null) {
                    throw new Exception("Cannot decide sentence root: multiple root found");
                }
                state.setCurr(candidate);
                state.getCurr().getSemantics().setSelect(!state.isAsk());
            } else {
                LOGGER.debug("[QUEUE] :: enqueueing substitution (entry: '{}' | idxPrev: {}) :\n{}", lexPattern,
                        state.getIdxPrev(), candidate.toPrettyString());
                state.addWaitingSubstitution(candidate, state.getIdxPrev());
            }
        }

        /* QUEUE CONSUMPTION */
        LOGGER.debug("[STATUS] :: QUEUE CONSUMPTION");
        if (state.getCurr() != null) {
            consumeWaitingSubstitutions(state);
            consumeWaitingAdjunctions(state);
        }

        LOGGER.debug("[STATUS] :: current SLTAG\n{}",
                (state.getCurr() != null) ? state.getCurr().toPrettyString() : "NONE");
    }

    if (state.getCurr() == null) {
        throw new Exception("Cannot build SLTAG");
    }

    /* AMBIGUITIES RESOLUTION */
    LOGGER.debug("[STATUS] :: AMBIGUITIES RESOLUTION");
    if (!state.getConflictList().isEmpty()) {
        solveAmbiguities(state, ontology);
    }

    /* POST-PROCESSING */
    LOGGER.debug("[STATUS] :: POST-PROCESSING");
    if (state.isAsk()) {
        LOGGER.debug("[POST-PROCESSING] :: setting ASK semantics");
        state.getCurr().getSemantics().setSelect(false);
    } else {
        LOGGER.debug("[POST-PROCESSING] :: setting SELECT semantics");
        state.getCurr().getSemantics().setSelect(true);
    }

    LOGGER.debug("[STATUS] :: current SLTAG\n{}", state.getCurr().toPrettyString());

    return state.getCurr();
}

From source file:net.ceos.project.poi.annotated.core.CGen.java

/**
 * Add the content of one line stored at the Map into the file.
 * //from www . j  a v  a2  s. com
 * @param fW
 *            the file to write
 * @param values
 *            the Map with the data to write at the file
 * @throws IOException
 */
private void addLine(final FileWriter fW, final Map<Integer, String> values, final String separator)
        throws IOException {
    /* append all values at the Map to the file */
    fW.append(values.values().stream().collect(Collectors.joining(separator)));
    /* add end of line */
    fW.append(Constants.END_OF_LINE);
}

From source file:io.gravitee.gateway.http.core.invoker.DefaultHttpInvoker.java

@Override
public ClientRequest invoke(ExecutionContext executionContext, Request serverRequest,
        Handler<ClientResponse> result) {
    // Get target if overridden by a policy
    String targetUri = (String) executionContext.getAttribute(ExecutionContext.ATTR_REQUEST_ENDPOINT);
    Endpoint<HttpClient> endpoint;

    // If not defined, use the one provided by the underlying load-balancer
    if (targetUri == null) {
        String endpointName = nextEndpoint(executionContext);
        endpoint = endpointManager.get(endpointName);

        targetUri = (endpoint != null) ? rewriteURI(serverRequest, endpoint.target()) : null;

        // Set the final target URI invoked
        executionContext.setAttribute(ExecutionContext.ATTR_REQUEST_ENDPOINT, targetUri);
    } else {//  w w w.j ava2s .  c o  m
        // Select a matching endpoint according to the URL
        // If none, select the first (non-backup) from the endpoint list.
        String finalTargetUri = targetUri;

        Optional<String> endpointName = endpointManager.targetByEndpoint().entrySet().stream()
                .filter(endpointEntry -> finalTargetUri.startsWith(endpointEntry.getValue()))
                .map(Map.Entry::getValue).findFirst();

        endpoint = endpointManager.getOrDefault(endpointName.isPresent() ? endpointName.get() : null);
    }

    // No endpoint has been selected by load-balancer strategy nor overridden value
    if (targetUri == null) {
        ServiceUnavailableResponse clientResponse = new ServiceUnavailableResponse();
        result.handle(clientResponse);
        clientResponse.endHandler().handle(null);
        return null;
    }

    // Remove duplicate slash
    targetUri = DUPLICATE_SLASH_REMOVER.matcher(targetUri).replaceAll("/");

    URI requestUri = encodeQueryParameters(serverRequest, targetUri);
    String uri = requestUri.toString();

    // Add the endpoint reference in metrics to know which endpoint has been invoked while serving the request
    serverRequest.metrics().setEndpoint(uri);

    final boolean enableHttpDump = api.getProxy().isDumpRequest();
    final HttpMethod httpMethod = extractHttpMethod(executionContext, serverRequest);

    ClientRequest clientRequest = invoke0(endpoint.connector(), httpMethod, requestUri, serverRequest,
            executionContext, (enableHttpDump) ? loggableClientResponse(result, serverRequest) : result);

    if (enableHttpDump) {
        HttpDump.logger.info("{}/{} >> Rewriting: {} -> {}", serverRequest.id(), serverRequest.transactionId(),
                serverRequest.uri(), uri);
        HttpDump.logger.info("{}/{} >> {} {}", serverRequest.id(), serverRequest.transactionId(), httpMethod,
                uri);

        serverRequest.headers()
                .forEach((headerName, headerValues) -> HttpDump.logger.info("{}/{} >> {}: {}",
                        serverRequest.id(), serverRequest.transactionId(), headerName,
                        headerValues.stream().collect(Collectors.joining(","))));

        clientRequest = new LoggableClientRequest(clientRequest, serverRequest);
    }

    return clientRequest;
}

From source file:com.synopsys.integration.blackduck.service.model.PolicyStatusDescription.java

private void getPolicySeverityMessage(final StringBuilder stringBuilder) {
    stringBuilder.append("Policy Severity counts: ");
    // let's loop over the actual enum values for a consistently ordered output
    final String policySeverityItems = Arrays.stream(PolicySeverityType.values())
            .filter(policySeverityCount::containsKey)
            .map(policySeverityType -> fixMatchPlural("%d %s a severity level of %s",
                    policySeverityCount.get(policySeverityType).value, policySeverityType))
            .collect(Collectors.joining(", "));
    stringBuilder.append(policySeverityItems);
}

From source file:de.blizzy.rust.lootconfig.LootConfigDump.java

private String formatItems(Category category) {
    return category.Items.stream().sorted((itemSpawn1, itemSpawn2) -> Collator.getInstance()
            .compare(itemSpawn1.item.Shortname, itemSpawn2.item.Shortname)).map(itemSpawn -> {
                if (itemSpawn.Amount > 1) {
                    return String.format("%s x%d", itemSpawn.item.Shortname, itemSpawn.Amount);
                } else {
                    return itemSpawn.item.Shortname;
                }//from w w w. ja v a  2s . co  m
            }).collect(Collectors.joining(", "));
}

From source file:com.nextdoor.bender.ipc.s3.S3Transport.java

@Override
public void sendBatch(TransportBuffer buffer, LinkedHashMap<String, String> partitions, Context context)
        throws TransportException {
    S3TransportBuffer buf = (S3TransportBuffer) buffer;

    /*//from   ww  w.j av a2  s . com
     * Create s3 key (filepath + filename)
     */
    LinkedHashMap<String, String> parts = new LinkedHashMap<String, String>(partitions);

    String filename = parts.remove(FILENAME_KEY);

    if (filename == null) {
        filename = context.getAwsRequestId();
    }

    String key = parts.entrySet().stream().map(s -> s.getKey() + "=" + s.getValue())
            .collect(Collectors.joining("/"));

    key = (key.equals("") ? filename : key + '/' + filename);

    if (this.basePath.endsWith("/")) {
        key = this.basePath + key;
    } else if (!this.basePath.equals("")) {
        key = this.basePath + '/' + key;
    }

    // TODO: make this dynamic
    if (key.endsWith(".gz")) {
        key = key.substring(0, key.length() - 3);
    }

    /*
     * Add or strip out compression format extension
     *
     * TODO: get this based on the compression codec
     */
    if (this.compress || buf.isCompressed()) {
        key += ".bz2";
    }

    ByteArrayOutputStream os = buf.getInternalBuffer();

    /*
     * Compress stream if needed. Don't compress a compressed stream.
     */
    ByteArrayOutputStream payload;
    if (this.compress && !buf.isCompressed()) {
        payload = compress(os);
    } else {
        payload = os;
    }

    /*
     * For memory efficiency convert the output stream into an InputStream. This is done using the
     * easystream library but under the hood it uses piped streams to facilitate this process. This
     * avoids copying the entire contents of the OutputStream to populate the InputStream. Note that
     * this process creates another thread to consume from the InputStream.
     */
    final String s3Key = key;

    /*
     * Write to OutputStream
     */
    final InputStreamFromOutputStream<String> isos = new InputStreamFromOutputStream<String>() {
        public String produce(final OutputStream dataSink) throws Exception {
            /*
             * Note this is executed in a different thread
             */
            payload.writeTo(dataSink);
            return null;
        }
    };

    /*
     * Consume InputStream
     */
    try {
        sendStream(isos, s3Key, payload.size());
    } finally {
        try {
            isos.close();
        } catch (IOException e) {
            throw new TransportException(e);
        } finally {
            buf.close();
        }
    }
}

From source file:io.fabric8.vertx.maven.plugin.mojos.AbstractRunMojo.java

@Override
public void execute() throws MojoExecutionException, MojoFailureException {
    if (skip) {//from  w  w  w  .j a  v  a 2s .  c  o m
        getLog().info("vertx:run skipped by configuration");
        return;
    }

    compileIfNeeded();

    List<String> argsList = new ArrayList<>();

    scanAndLoadConfigs();

    boolean isVertxLauncher = isVertxLauncher(launcher);

    getLog().info("Launching Vert.x Application");

    if (isVertxLauncher) {
        addVertxArgs(argsList);
    } else if (redeploy) {
        getLog().info("Vert.x application redeploy enabled");
        argsList.add(0, IO_VERTX_CORE_LAUNCHER);
        argsList.add(1, "run");
        StringBuilder redeployArg = new StringBuilder();
        redeployArg.append(VERTX_ARG_REDEPLOY); //fix for redeploy to work
        computeOutputDirsWildcard(redeployArg);
        argsList.add(redeployArg.toString());
        addRedeployExtraArgs(argsList);
        argsList.add(VERTX_ARG_LAUNCHER_CLASS);
        argsList.add(launcher);

        if (jvmArgs != null && !jvmArgs.isEmpty()) {
            String javaOpts = jvmArgs.stream().collect(Collectors.joining(" "));
            String argJavaOpts = VERTX_ARG_JAVA_OPT + "=" + javaOpts;
            argsList.add(argJavaOpts);
        }
    } else {
        argsList.add(launcher);
    }
    addRunExtraArgs(argsList);
    run(argsList);
}

From source file:com.stratio.qa.utils.GosecSSOUtils.java

private String getStringFromIS(InputStream stream) {
    try (BufferedReader reader = new BufferedReader(new InputStreamReader(stream))) {
        return reader.lines().collect(Collectors.joining("\n"));
    } catch (IOException e) {
        e.printStackTrace();//from  w  w w. java2  s .  com
        return "";
    }

}

From source file:net.sf.jabref.sql.exporter.DatabaseExporter.java

/**
 * Generates the DML required to populate the entries table with jabref data and writes it to the output
 * PrintStream./* w w w  . j a  v a  2 s. c o m*/
 *
 * @param database_id ID of Jabref database related to the entries to be exported This information can be gathered
 *                    using getDatabaseIDByPath(metaData, connection)
 * @param entries     The BibtexEntries to export
 * @param connection  The output (PrintStream or Connection) object to which the DML should be written.
 */
private void populateEntriesTable(final int database_id, List<BibEntry> entries, Connection connection)
        throws SQLException {
    for (BibEntry entry : entries) {
        try (PreparedStatement statement = connection
                .prepareStatement("INSERT INTO entries (jabref_eid, entry_types_id, cite_key, "
                        + SQLUtil.getFieldStr() + ", database_id) "
                        + "VALUES (?, (SELECT entry_types_id FROM entry_types WHERE label= ? ), ?, "
                        + SQLUtil.getAllFields().stream().map(s -> "?").collect(Collectors.joining(", "))
                        + ", ?);")) {
            statement.setString(1, entry.getId());
            statement.setString(2, entry.getType());
            statement.setString(3, entry.getCiteKey());
            int value = 4;
            for (String field : SQLUtil.getAllFields()) {
                statement.setString(value, entry.getField(field));
                value++;
            }
            statement.setInt(value, database_id);

            statement.execute();
        }
    }
}

From source file:com.embedler.moon.graphql.boot.sample.schema.TodoSchema.java

@GraphQLMutation
public @GraphQLOut("filename") String uploadFile(GraphQLContext graphQLContext) {
    return graphQLContext.getFiles().orElse(new HashMap<>()).values().stream().flatMap(Collection::stream)
            .map(FileItem::getName).collect(Collectors.joining(", "));
}