Example usage for java.io PipedOutputStream PipedOutputStream

List of usage examples for java.io PipedOutputStream PipedOutputStream

Introduction

In this page you can find the example usage for java.io PipedOutputStream PipedOutputStream.

Prototype

public PipedOutputStream() 

Source Link

Document

Creates a piped output stream that is not yet connected to a piped input stream.

Usage

From source file:org.talend.dataprep.transformation.service.TransformationService.java

/**
 * Compute the given aggregation.//from  ww  w .j ava  2 s  . c  o m
 *
 * @param rawParams the aggregation rawParams as body rawParams.
 */
// @formatter:off
@RequestMapping(value = "/aggregate", method = POST, produces = APPLICATION_JSON_VALUE, consumes = APPLICATION_JSON_VALUE)
@ApiOperation(value = "Compute the aggregation according to the request body rawParams", consumes = APPLICATION_JSON_VALUE)
@VolumeMetered
public AggregationResult aggregate(
        @ApiParam(value = "The aggregation rawParams in json") @RequestBody final String rawParams) {
    // @formatter:on

    // parse the aggregation parameters
    final AggregationParameters parameters;
    try {
        parameters = mapper.readerFor(AggregationParameters.class).readValue(rawParams);
        LOG.debug("Aggregation requested {}", parameters);
    } catch (IOException e) {
        throw new TDPException(CommonErrorCodes.BAD_AGGREGATION_PARAMETERS, e);
    }

    InputStream contentToAggregate;

    // get the content of the preparation (internal call with piped streams)
    if (StringUtils.isNotBlank(parameters.getPreparationId())) {
        try {
            PipedOutputStream temp = new PipedOutputStream();
            contentToAggregate = new PipedInputStream(temp);

            // because of piped streams, processing must be asynchronous
            Runnable r = () -> {
                try {
                    final ExportParameters exportParameters = new ExportParameters();
                    exportParameters.setPreparationId(parameters.getPreparationId());
                    exportParameters.setDatasetId(parameters.getDatasetId());
                    if (parameters.getFilter() != null) {
                        exportParameters.setFilter(mapper.readTree(parameters.getFilter()));
                    }
                    exportParameters.setExportType(JSON);
                    exportParameters.setStepId(parameters.getStepId());

                    final StreamingResponseBody body = executeSampleExportStrategy(exportParameters);
                    body.writeTo(temp);
                } catch (IOException e) {
                    throw new TDPException(CommonErrorCodes.UNABLE_TO_AGGREGATE, e);
                }
            };
            executor.execute(r);
        } catch (IOException e) {
            throw new TDPException(CommonErrorCodes.UNABLE_TO_AGGREGATE, e);
        }
    } else {
        final DataSetGet dataSetGet = context.getBean(DataSetGet.class, parameters.getDatasetId(), false, true);
        contentToAggregate = dataSetGet.execute();
    }

    // apply the aggregation
    try (JsonParser parser = mapper.getFactory().createParser(contentToAggregate)) {
        final DataSet dataSet = mapper.readerFor(DataSet.class).readValue(parser);
        return aggregationService.aggregate(parameters, dataSet);
    } catch (IOException e) {
        throw new TDPException(CommonErrorCodes.UNABLE_TO_PARSE_JSON, e);
    } finally {
        // don't forget to release the connection
        if (contentToAggregate != null) {
            try {
                contentToAggregate.close();
            } catch (IOException e) {
                LOG.warn("Could not close dataset input stream while aggregating", e);
            }
        }
    }
}

From source file:org.whitesource.agent.utils.ZipUtils.java

private static void fillExportStreamDecompress(String text, StringBuilder stringBuilder) {
    try {// w  w w  .j  ava2s .c  o m
        try (PipedInputStream pipedInputStream = new PipedInputStream()) {
            try (PipedOutputStream pipedOutputStream = new PipedOutputStream()) {
                pipedInputStream.connect(pipedOutputStream);

                Runnable producer = new Runnable() {
                    @Override
                    public void run() {
                        produceDecompressData(text, pipedOutputStream);
                    }
                };
                Runnable consumer = new Runnable() {
                    @Override
                    public void run() {
                        consumeDecompressData(pipedInputStream, stringBuilder);
                    }
                };

                transferData(producer, consumer);
            }
        }
    } catch (IOException e) {
        // logger.error("Failed to decompress : ", e);
    }
}

From source file:freenet.client.ArchiveManager.java

/**
 * Extract data to cache. Call synchronized on ctx.
 * @param key The key the data was fetched from.
 * @param archiveType The archive type. Must be Metadata.ARCHIVE_ZIP | Metadata.ARCHIVE_TAR.
 * @param data The actual data fetched.//from   w w  w  .j av a2  s  .  c  o m
 * @param archiveContext The context for the whole fetch process.
 * @param ctx The ArchiveStoreContext for this key.
 * @param element A particular element that the caller is especially interested in, or null.
 * @param callback A callback to be called if we find that element, or if we don't.
 * @throws ArchiveFailureException If we could not extract the data, or it was too big, etc.
 * @throws ArchiveRestartException
 * @throws ArchiveRestartException If the request needs to be restarted because the archive
 * changed.
 */
public void extractToCache(FreenetURI key, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE ctype, final Bucket data,
        ArchiveContext archiveContext, ArchiveStoreContext ctx, String element, ArchiveExtractCallback callback,
        ClientContext context) throws ArchiveFailureException, ArchiveRestartException {
    logMINOR = Logger.shouldLog(LogLevel.MINOR, this);

    MutableBoolean gotElement = element != null ? new MutableBoolean() : null;

    if (logMINOR)
        Logger.minor(this, "Extracting " + key);
    ctx.removeAllCachedItems(this); // flush cache anyway
    final long expectedSize = ctx.getLastSize();
    final long archiveSize = data.size();
    /** Set if we need to throw a RestartedException rather than returning success,
     * after we have unpacked everything.
     */
    boolean throwAtExit = false;
    if ((expectedSize != -1) && (archiveSize != expectedSize)) {
        throwAtExit = true;
        ctx.setLastSize(archiveSize);
    }
    byte[] expectedHash = ctx.getLastHash();
    if (expectedHash != null) {
        byte[] realHash;
        try {
            realHash = BucketTools.hash(data);
        } catch (IOException e) {
            throw new ArchiveFailureException("Error reading archive data: " + e, e);
        }
        if (!Arrays.equals(realHash, expectedHash))
            throwAtExit = true;
        ctx.setLastHash(realHash);
    }

    if (archiveSize > archiveContext.maxArchiveSize)
        throw new ArchiveFailureException(
                "Archive too big (" + archiveSize + " > " + archiveContext.maxArchiveSize + ")!");
    else if (archiveSize <= 0)
        throw new ArchiveFailureException("Archive too small! (" + archiveSize + ')');
    else if (logMINOR)
        Logger.minor(this, "Container size (possibly compressed): " + archiveSize + " for " + data);

    InputStream is = null;
    try {
        final ExceptionWrapper wrapper;
        if ((ctype == null) || (ARCHIVE_TYPE.ZIP == archiveType)) {
            if (logMINOR)
                Logger.minor(this, "No compression");
            is = data.getInputStream();
            wrapper = null;
        } else if (ctype == COMPRESSOR_TYPE.BZIP2) {
            if (logMINOR)
                Logger.minor(this, "dealing with BZIP2");
            is = new BZip2CompressorInputStream(data.getInputStream());
            wrapper = null;
        } else if (ctype == COMPRESSOR_TYPE.GZIP) {
            if (logMINOR)
                Logger.minor(this, "dealing with GZIP");
            is = new GZIPInputStream(data.getInputStream());
            wrapper = null;
        } else if (ctype == COMPRESSOR_TYPE.LZMA_NEW) {
            // LZMA internally uses pipe streams, so we may as well do it here.
            // In fact we need to for LZMA_NEW, because of the properties bytes.
            PipedInputStream pis = new PipedInputStream();
            PipedOutputStream pos = new PipedOutputStream();
            pis.connect(pos);
            final OutputStream os = new BufferedOutputStream(pos);
            wrapper = new ExceptionWrapper();
            context.mainExecutor.execute(new Runnable() {

                @Override
                public void run() {
                    InputStream is = null;
                    try {
                        Compressor.COMPRESSOR_TYPE.LZMA_NEW.decompress(is = data.getInputStream(), os,
                                data.size(), expectedSize);
                    } catch (CompressionOutputSizeException e) {
                        Logger.error(this, "Failed to decompress archive: " + e, e);
                        wrapper.set(e);
                    } catch (IOException e) {
                        Logger.error(this, "Failed to decompress archive: " + e, e);
                        wrapper.set(e);
                    } finally {
                        try {
                            os.close();
                        } catch (IOException e) {
                            Logger.error(this, "Failed to close PipedOutputStream: " + e, e);
                        }
                        Closer.close(is);
                    }
                }

            });
            is = pis;
        } else if (ctype == COMPRESSOR_TYPE.LZMA) {
            if (logMINOR)
                Logger.minor(this, "dealing with LZMA");
            is = new LzmaInputStream(data.getInputStream());
            wrapper = null;
        } else {
            wrapper = null;
        }

        if (ARCHIVE_TYPE.ZIP == archiveType)
            handleZIPArchive(ctx, key, is, element, callback, gotElement, throwAtExit, context);
        else if (ARCHIVE_TYPE.TAR == archiveType)
            handleTARArchive(ctx, key, is, element, callback, gotElement, throwAtExit, context);
        else
            throw new ArchiveFailureException("Unknown or unsupported archive algorithm " + archiveType);
        if (wrapper != null) {
            Exception e = wrapper.get();
            if (e != null)
                throw new ArchiveFailureException("An exception occured decompressing: " + e.getMessage(), e);
        }
    } catch (IOException ioe) {
        throw new ArchiveFailureException("An IOE occured: " + ioe.getMessage(), ioe);
    } finally {
        Closer.close(is);
    }
}

From source file:lucee.commons.io.res.type.datasource.DatasourceResourceProvider.java

public synchronized OutputStream getOutputStream(ConnectionData data, int fullPathHash, int pathHash,
        String path, String name, boolean append) throws IOException {

    Attr attr = getAttr(data, fullPathHash, path, name);
    if (attr.getId() == 0) {
        create(data, fullPathHash, pathHash, path, name, Attr.TYPE_FILE);
        attr = getAttr(data, fullPathHash, path, name);
    }//from  w w  w .  java  2 s.c  om

    PipedInputStream pis = new PipedInputStream();
    PipedOutputStream pos = new PipedOutputStream();
    pis.connect(pos);
    DatasourceConnection dc = null;
    //Connection c=null;
    try {
        dc = getDatasourceConnection(data);
        //Connection c = dc.getConnection();

        DataWriter writer = new DataWriter(getCore(data), dc, data.getPrefix(), attr, pis, this, append);
        writer.start();

        return new DatasourceResourceOutputStream(writer, pos);
        //core.getOutputStream(dc, name, attr, pis);
    } catch (PageException e) {
        throw new PageRuntimeException(e);
    } finally {
        removeFromCache(data, path, name);
        //manager.releaseConnection(CONNECTION_ID,dc);
    }
}

From source file:org.apache.tajo.cli.tsql.TestTajoCli.java

License:asdf

@Test
public void testRunWhenError() throws Exception {
    Thread t = new Thread() {
        public void run() {
            try {
                PipedOutputStream po = new PipedOutputStream();
                InputStream is = new PipedInputStream(po);
                ByteArrayOutputStream out = new ByteArrayOutputStream();

                TajoConf tajoConf = new TajoConf();
                setVar(tajoCli, SessionVars.CLI_FORMATTER_CLASS, TajoCliOutputTestFormatter.class.getName());
                Properties connParams = new Properties();
                connParams.setProperty(ClientParameters.RETRY, "3");
                TajoCli tc = new TajoCli(tajoConf, new String[] {}, connParams, is, out, err);

                tc.executeMetaCommand("\\set ON_ERROR_STOP false");
                assertSessionVar(tc, SessionVars.ON_ERROR_STOP.keyname(), "false");

                po.write(new String("asdf;\nqwe;\nzxcv;\n").getBytes());

                tc.runShell();// w  w w .ja v  a  2s .  c o m
            } catch (Exception e) {
                throw new RuntimeException("Cannot run thread in testRunWhenError", e);
            }
        }
    };

    t.start();
    Thread.sleep(1000);
    if (!t.isAlive()) {
        fail("TSQL should be alive");
    } else {
        t.interrupt();
        t.join();
    }
}

From source file:org.mobicents.slee.resource.tftp.TFTPTransfer.java

protected InputStream getInputStream() throws IOException {
    if (!isWrite())
        throw new IOException("No write request pending");
    if (sbbIs == null && os_ == null) {
        os_ = new PipedOutputStream();
        sbbIs = new PipedInputStream((PipedOutputStream) os_);
        resume();//from  www  .  java 2  s .  c  o m
    }
    return sbbIs;
}

From source file:org.mobicents.slee.resource.tftp.TFTPTransfer.java

protected OutputStream getOutputStream() throws IOException {
    if (!isRead())
        throw new IOException("No read request pending");
    if (sbbOs == null && is_ == null) {
        sbbOs = new PipedOutputStream();
        is_ = new PipedInputStream((PipedOutputStream) sbbOs);
        resume();// www.  j a va 2 s.c o m
    }
    return sbbOs;
}

From source file:org.gradle.integtests.fixtures.executer.AbstractGradleExecuter.java

@Override
public GradleExecuter withStdinPipe() {
    return withStdinPipe(new PipedOutputStream());
}

From source file:io.fabric8.docker.client.impl.BuildImage.java

@Override
public FromPathInterface<OutputHandle> redirectingOutput() {
    return new BuildImage(client, config, repositoryName, dockerFile, noCache, buildArgs, pulling,
            alwaysRemoveIntermediate, removeIntermediateOnSuccess, supressingVerboseOutput, cpuPeriodMicros,
            cpuQuotaMicros, cpuShares, cpus, memorySize, swapSize, new PipedOutputStream(), listener);
}

From source file:org.apache.nifi.remote.util.SiteToSiteRestApiClient.java

public void openConnectionForSend(final String transactionUrl, final Peer peer) throws IOException {

    final CommunicationsSession commSession = peer.getCommunicationsSession();
    final String flowFilesPath = transactionUrl + "/flow-files";
    final HttpPost post = createPost(flowFilesPath);
    // Set uri so that it'll be used as transit uri.
    ((HttpCommunicationsSession) peer.getCommunicationsSession()).setDataTransferUrl(post.getURI().toString());

    post.setHeader("Content-Type", "application/octet-stream");
    post.setHeader("Accept", "text/plain");
    post.setHeader(HttpHeaders.PROTOCOL_VERSION,
            String.valueOf(transportProtocolVersionNegotiator.getVersion()));

    setHandshakeProperties(post);/*from   w  ww .  j  av  a  2s .co  m*/

    final CountDownLatch initConnectionLatch = new CountDownLatch(1);

    final URI requestUri = post.getURI();
    final PipedOutputStream outputStream = new PipedOutputStream();
    final PipedInputStream inputStream = new PipedInputStream(outputStream,
            DATA_PACKET_CHANNEL_READ_BUFFER_SIZE);
    final ReadableByteChannel dataPacketChannel = Channels.newChannel(inputStream);
    final HttpAsyncRequestProducer asyncRequestProducer = new HttpAsyncRequestProducer() {

        private final ByteBuffer buffer = ByteBuffer.allocate(DATA_PACKET_CHANNEL_READ_BUFFER_SIZE);

        private int totalRead = 0;
        private int totalProduced = 0;

        private boolean requestHasBeenReset = false;

        @Override
        public HttpHost getTarget() {
            return URIUtils.extractHost(requestUri);
        }

        @Override
        public HttpRequest generateRequest() throws IOException, HttpException {

            // Pass the output stream so that Site-to-Site client thread can send
            // data packet through this connection.
            logger.debug("sending data to {} has started...", flowFilesPath);
            ((HttpOutput) commSession.getOutput()).setOutputStream(outputStream);
            initConnectionLatch.countDown();

            final BasicHttpEntity entity = new BasicHttpEntity();
            entity.setChunked(true);
            entity.setContentType("application/octet-stream");
            post.setEntity(entity);
            return post;
        }

        private final AtomicBoolean bufferHasRemainingData = new AtomicBoolean(false);

        /**
         * If the proxy server requires authentication, the same POST request has to be sent again.
         * The first request will result 407, then the next one will be sent with auth headers and actual data.
         * This method produces a content only when it's need to be sent, to avoid producing the flow-file contents twice.
         * Whether we need to wait auth is determined heuristically by the previous POST request which creates transaction.
         * See {@link SiteToSiteRestApiClient#initiateTransactionForSend(HttpPost)} for further detail.
         */
        @Override
        public void produceContent(final ContentEncoder encoder, final IOControl ioControl) throws IOException {

            if (shouldCheckProxyAuth() && proxyAuthRequiresResend.get() && !requestHasBeenReset) {
                logger.debug("Need authentication with proxy server. Postpone producing content.");
                encoder.complete();
                return;
            }

            if (bufferHasRemainingData.get()) {
                // If there's remaining buffer last time, send it first.
                writeBuffer(encoder);
                if (bufferHasRemainingData.get()) {
                    return;
                }
            }

            int read;
            // This read() blocks until data becomes available,
            // or corresponding outputStream is closed.
            if ((read = dataPacketChannel.read(buffer)) > -1) {

                logger.trace("Read {} bytes from dataPacketChannel. {}", read, flowFilesPath);
                totalRead += read;

                buffer.flip();
                writeBuffer(encoder);

            } else {

                final long totalWritten = commSession.getOutput().getBytesWritten();
                logger.debug(
                        "sending data to {} has reached to its end. produced {} bytes by reading {} bytes from channel. {} bytes written in this transaction.",
                        flowFilesPath, totalProduced, totalRead, totalWritten);

                if (totalRead != totalWritten || totalProduced != totalWritten) {
                    final String msg = "Sending data to %s has reached to its end, but produced : read : wrote byte sizes (%d : %d : %d) were not equal. Something went wrong.";
                    throw new RuntimeException(
                            String.format(msg, flowFilesPath, totalProduced, totalRead, totalWritten));
                }
                transferDataLatch.countDown();
                encoder.complete();
                dataPacketChannel.close();
            }

        }

        private void writeBuffer(ContentEncoder encoder) throws IOException {
            while (buffer.hasRemaining()) {
                final int written = encoder.write(buffer);
                logger.trace("written {} bytes to encoder.", written);
                if (written == 0) {
                    logger.trace("Buffer still has remaining. {}", buffer);
                    bufferHasRemainingData.set(true);
                    return;
                }
                totalProduced += written;
            }
            bufferHasRemainingData.set(false);
            buffer.clear();
        }

        @Override
        public void requestCompleted(final HttpContext context) {
            logger.debug("Sending data to {} completed.", flowFilesPath);
            debugProxyAuthState(context);
        }

        @Override
        public void failed(final Exception ex) {
            final String msg = String.format("Failed to send data to %s due to %s", flowFilesPath,
                    ex.toString());
            logger.error(msg, ex);
            eventReporter.reportEvent(Severity.WARNING, EVENT_CATEGORY, msg);
        }

        @Override
        public boolean isRepeatable() {
            // In order to pass authentication, request has to be repeatable.
            return true;
        }

        @Override
        public void resetRequest() throws IOException {
            logger.debug("Sending data request to {} has been reset...", flowFilesPath);
            requestHasBeenReset = true;
        }

        @Override
        public void close() throws IOException {
            logger.debug("Closing sending data request to {}", flowFilesPath);
            closeSilently(outputStream);
            closeSilently(dataPacketChannel);
            stopExtendingTtl();
        }
    };

    postResult = getHttpAsyncClient().execute(asyncRequestProducer, new BasicAsyncResponseConsumer(), null);

    try {
        // Need to wait the post request actually started so that we can write to its output stream.
        if (!initConnectionLatch.await(connectTimeoutMillis, TimeUnit.MILLISECONDS)) {
            throw new IOException("Awaiting initConnectionLatch has been timeout.");
        }

        // Started.
        transferDataLatch = new CountDownLatch(1);
        startExtendingTtl(transactionUrl, dataPacketChannel, null);

    } catch (final InterruptedException e) {
        throw new IOException("Awaiting initConnectionLatch has been interrupted.", e);
    }

}