Example usage for java.io PipedOutputStream PipedOutputStream

List of usage examples for java.io PipedOutputStream PipedOutputStream

Introduction

In this page you can find the example usage for java.io PipedOutputStream PipedOutputStream.

Prototype

public PipedOutputStream(PipedInputStream snk) throws IOException 

Source Link

Document

Creates a piped output stream connected to the specified piped input stream.

Usage

From source file:org.taverna.server.master.worker.RemoteRunDelegate.java

@Override
public ZipStream getContentsAsZip() throws FilesystemAccessException {
    ZipStream zs = new ZipStream();

    final ZipOutputStream zos;
    try {/*  w w  w.j av  a2s .co  m*/
        zos = new ZipOutputStream(new PipedOutputStream(zs));
    } catch (IOException e) {
        throw new FilesystemAccessException("problem building zip stream", e);
    }
    Thread t = new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                zipDirectory(rd, null, zos);
            } catch (IOException e) {
                log.warn("problem when zipping directory", e);
            } finally {
                closeQuietly(zos);
            }
        }
    });
    t.setDaemon(true);
    t.start();
    return zs;
}

From source file:com.heliosapm.tsdblite.json.JSON.java

/**
 * Serializes the passed object and pipes back the results in an InputStream to read it back.
 * Spawns a thread to run the pipe out so the calling thread only needs to read the returned input stream.
 * If the serialization fails, the worker thread will close the inoput stream to signal the failure.
 * @param obj The object to serialize//  www .  ja  v  a  2s . co m
 * @return an InputStream to read back the JSON serialized object 
 */
public static InputStream serializeLoopBack(final Object obj) {
    if (obj == null)
        throw new IllegalArgumentException("The passed object was null");
    try {
        final PipedInputStream pin = new PipedInputStream(2048);
        final PipedOutputStream pout = new PipedOutputStream(pin);
        final Thread t = new Thread("serializeLoopBackThread") {
            @Override
            public void run() {
                try {
                    serialize(obj, pout);
                } catch (Exception ex) {
                    try {
                        pin.close();
                    } catch (Exception x) {
                        /* No Op */}
                }
            }
        };
        t.setDaemon(true);
        t.start();
        return pin;
    } catch (Exception ex) {
        throw new RuntimeException("Failed to pipe serialized object", ex);
    }
}

From source file:ch.iterate.openstack.swift.Client.java

/**
 * @param container          The name of the container
 * @param name               The name of the object
 * @param entity             The name of the request entity (make sure to set the Content-Type
 * @param metadata           The metadata for the object
 * @param md5sum             The 32 character hex encoded MD5 sum of the data
 * @param objectSize         The total size in bytes of the object to be stored
 * @param segmentSize        Optional size in bytes of the object segments to be stored (forces large object support) default 4G
 * @param dynamicLargeObject Optional setting to use dynamic large objects, False/null will use static large objects if required
 * @param segmentContainer   Optional name of container to store file segments, defaults to storing chunks in the same container as the file sill appear
 * @param segmentFolder      Optional name of folder for storing file segments, defaults to ".chunks/"
 * @param leaveSegments      Optional setting to leave segments of large objects in place when the manifest is overwrtten/changed
 * @return The ETAG if the save was successful, null otherwise
 * @throws GenericException There was a protocol level error talking to CloudFiles
 *///from  w  w w.j  a va  2  s . c  o  m
public String storeObject(Region region, String container, String name, HttpEntity entity,
        Map<String, String> metadata, String md5sum, Long objectSize, Long segmentSize,
        Boolean dynamicLargeObject, String segmentContainer, String segmentFolder, Boolean leaveSegments)
        throws IOException, InterruptedException {
    /*
     * Default values for large object support. We also use the defaults combined with the inputs
     * to determine whether to store as a large object.
     */

    /*
     * The maximum size of a single object (5GiB).
     */
    long singleObjectSizeLimit = (long) (5 * Math.pow(1024, 3));

    /*
     * The default minimum segment size (1MiB).
     */
    long minSegmentSize = 1024L * 1024L;

    /*
     * Set the segment size.
     *
     * Defaults to 4GiB segments, and will not permit smaller than 1MiB segments.
     */
    long actualSegmentSize = (segmentSize == null) ? (long) (4 * Math.pow(1024, 3))
            : Math.max(segmentSize, minSegmentSize);

    /*
     * Determines if we will store using large objects - we may do this for 3 reasons:
     *
     *  - A segmentSize has been specified and the object size is greater than the minimum segment size
     *  - If an objectSize is provided and is larger than the single object size limit of 5GiB
     *  - A segmentSize has been specified, but no objectSize given (we take this as a request for segmentation)
     *
     * The last case may fail if the user does not provide at least as much data as the minimum segment
     * size configured on the server, and will always produce a large object structure (even if only one
     * small segment is required).
     */
    objectSize = (objectSize == null) ? -1 : objectSize;
    boolean useLargeObject = ((segmentSize != null) && (objectSize > actualSegmentSize))
            || (objectSize > singleObjectSizeLimit) || ((segmentSize != null) && (objectSize == -1));

    if (!useLargeObject) {
        return storeObject(region, container, name, entity, metadata, md5sum);
    } else {
        /*
         * We need to upload a large object as defined by the method
         * parameters. For now this is done sequentially, but a parallel
         * version using appropriate random access to the underlying data
         * may be desirable.
         *
         * We make the assumption that the given file size will not be
         * greater than int.MAX_VALUE * segmentSize
         *
         */
        leaveSegments = (leaveSegments == null) ? Boolean.FALSE : leaveSegments;
        dynamicLargeObject = (dynamicLargeObject == null) ? Boolean.FALSE : dynamicLargeObject;
        segmentFolder = (segmentFolder == null) ? ".file-segments" : segmentFolder;
        segmentContainer = (segmentContainer == null) ? container : segmentContainer;

        Map<String, List<StorageObject>> oldSegmentsToRemove = null;

        /*
         * If we have chosen not to leave existing large object segments in place (default)
         * then we need to collect information about any existing file segments so that we can
         * deal with them after we complete the upload of the new manifest.
         *
         * We should only delete existing segments after a successful upload of a new manifest file
         * because this constitutes an object update and the older file should remain available
         * until the new file can be downloaded.
         */
        if (!leaveSegments) {
            ObjectMetadata existingMetadata;
            String manifestDLO = null;
            Boolean manifestSLO = Boolean.FALSE;

            try {
                existingMetadata = getObjectMetaData(region, container, name);

                if (existingMetadata.getMetaData().containsKey(Constants.MANIFEST_HEADER)) {
                    manifestDLO = existingMetadata.getMetaData().get(Constants.MANIFEST_HEADER);
                } else if (existingMetadata.getMetaData().containsKey(Constants.X_STATIC_LARGE_OBJECT)) {
                    JSONParser parser = new JSONParser();
                    String manifestSLOValue = existingMetadata.getMetaData()
                            .get(Constants.X_STATIC_LARGE_OBJECT);
                    manifestSLO = (Boolean) parser.parse(manifestSLOValue);
                }
            } catch (NotFoundException e) {
                /*
                 * Just means no object exists already, so continue
                 */
            } catch (ParseException e) {
                /*
                 * X_STATIC_LARGE_OBJECT header existed but failed to parse.
                 * If a static large object already exists this must be set to "true".
                 * If we got here then the X_STATIC_LARGE_OBJECT header existed, but failed
                 * to parse as a boolean, so fail upload as a precaution.
                 */
                return null;
            }

            if (manifestDLO != null) {
                /*
                 * We have found an existing dynamic large object, so use the prefix to get a list of
                 * existing objects. If we're putting up a new dlo, make sure the segment prefixes are
                 * different, then we can delete anything that's not in the new list if necessary.
                 */
                String oldContainer = manifestDLO.substring(0, manifestDLO.indexOf('/', 1));
                String oldPath = manifestDLO.substring(manifestDLO.indexOf('/', 1), manifestDLO.length());
                oldSegmentsToRemove = new HashMap<String, List<StorageObject>>();
                oldSegmentsToRemove.put(oldContainer, listObjects(region, oldContainer, oldPath));
            } else if (manifestSLO) {
                /*
                 * We have found an existing static large object, so grab the manifest data that
                 * details the existing segments - delete any later that we don't need any more
                 */

            }
        }

        int segmentNumber = 1;
        long timeStamp = System.currentTimeMillis() / 1000L;
        String segmentBase = String.format("%s/%d/%d", segmentFolder, timeStamp, objectSize);

        /*
         * Create subInputStream from the OutputStream we will pass to the
         * HttpEntity for writing content.
         */
        final PipedInputStream contentInStream = new PipedInputStream(64 * 1024);
        final PipedOutputStream contentOutStream = new PipedOutputStream(contentInStream);
        SubInputStream segmentStream = new SubInputStream(contentInStream, actualSegmentSize, false);

        /*
         * Fork the call to entity.writeTo() that allows us to grab any exceptions raised
         */
        final HttpEntity e = entity;

        final Callable<Boolean> writer = new Callable<Boolean>() {
            public Boolean call() throws Exception {
                e.writeTo(contentOutStream);
                return Boolean.TRUE;
            }
        };

        ExecutorService writeExecutor = Executors.newSingleThreadExecutor();
        final Future<Boolean> future = writeExecutor.submit(writer);
        /*
         * Check the future for exceptions after we've finished uploading segments
         */

        Map<String, List<StorageObject>> newSegmentsAdded = new HashMap<String, List<StorageObject>>();
        List<StorageObject> newSegments = new LinkedList<StorageObject>();
        JSONArray manifestSLO = new JSONArray();
        boolean finished = false;

        /*
         * Upload each segment of the file by reading sections of the content input stream
         * until the entire underlying stream is complete
         */
        while (!finished) {
            String segmentName = String.format("%s/%08d", segmentBase, segmentNumber);

            String etag;
            boolean error = false;
            try {
                etag = storeObject(region, segmentContainer, segmentStream, "application/octet-stream",
                        segmentName, new HashMap<String, String>());
            } catch (IOException ex) {
                // Finished storing the object
                System.out.println("Caught IO Exception: " + ex.getMessage());
                ex.printStackTrace();
                throw ex;
            }
            String segmentPath = segmentContainer + "/" + segmentName;
            long bytesUploaded = segmentStream.getBytesProduced();

            /*
             * Create the appropriate manifest structure if we're making a static large
             * object.
             *
             *   ETAG returned by the simple upload
             *   total size of segment uploaded
             *   path of segment
             */
            if (!dynamicLargeObject) {
                JSONObject segmentJSON = new JSONObject();

                segmentJSON.put("path", segmentPath);
                segmentJSON.put("etag", etag);
                segmentJSON.put("size_bytes", bytesUploaded);
                manifestSLO.add(segmentJSON);

                newSegments.add(new StorageObject(segmentName));
            }

            segmentNumber++;
            if (!finished) {
                finished = segmentStream.endSourceReached();
            }
            newSegmentsAdded.put(segmentContainer, newSegments);
            System.out.println("JSON: " + manifestSLO.toString());
            if (error)
                return "";

            segmentStream.readMoreBytes(actualSegmentSize);
        }

        /*
         * Attempts to retrieve the return value from the write operation
         * Any exceptions raised can then be handled appropriately
         */
        try {
            future.get();
        } catch (InterruptedException ex) {
            /*
             * The write was interrupted... delete the segments?
             */
        } catch (ExecutionException ex) {
            /*
             * This should always be an IOException or a RuntimeException
             * because the call to entity.writeTo() only throws IOException
             */
            Throwable t = ex.getCause();

            if (t instanceof IOException) {
                throw (IOException) t;
            } else {
                throw (RuntimeException) t;
            }
        }

        /*
         * Create an appropriate manifest depending on our DLO/SLO choice
         */
        String manifestEtag = null;
        if (dynamicLargeObject) {
            /*
             * Empty manifest with header detailing the shared prefix of object segments
             */
            long manifestTimeStamp = System.currentTimeMillis() / 1000L;
            metadata.put("X-Object-Manifest", segmentBase);
            metadata.put("x-object-meta-mtime", String.format("%s", manifestTimeStamp));
            manifestEtag = storeObject(region, container, new ByteArrayInputStream(new byte[0]),
                    entity.getContentType().getValue(), name, metadata);
        } else {
            /*
             * Manifest containing json list specifying details of the object segments.
             */
            URIBuilder urlBuild = new URIBuilder(region.getStorageUrl(container, name));
            urlBuild.setParameter("multipart-manifest", "put");
            URI url;
            try {
                url = urlBuild.build();
                String manifestContent = manifestSLO.toString();
                InputStreamEntity manifestEntity = new InputStreamEntity(
                        new ByteArrayInputStream(manifestContent.getBytes()), -1);
                manifestEntity.setChunked(true);
                manifestEntity.setContentType(entity.getContentType());
                HttpPut method = new HttpPut(url);
                method.setEntity(manifestEntity);
                method.setHeader("x-static-large-object", "true");
                Response response = this.execute(method, new DefaultResponseHandler());
                if (response.getStatusCode() == HttpStatus.SC_CREATED) {
                    manifestEtag = response.getResponseHeader(HttpHeaders.ETAG).getValue();
                } else {
                    throw new GenericException(response);
                }
            } catch (URISyntaxException ex) {
                ex.printStackTrace();
            }
        }

        /*
         * Delete stale segments of overwritten large object if requested.
         */
        if (!leaveSegments) {
            /*
             * Before deleting old segments, remove any objects from the delete list
             * that are also part of a new static large object that were updated during the upload.
             */
            if (!(oldSegmentsToRemove == null)) {
                for (String c : oldSegmentsToRemove.keySet()) {
                    List<StorageObject> rmv = oldSegmentsToRemove.get(c);
                    if (newSegmentsAdded.containsKey(c)) {
                        rmv.removeAll(newSegmentsAdded.get(c));
                    }
                    List<String> rmvNames = new LinkedList<String>();
                    for (StorageObject s : rmv) {
                        rmvNames.add(s.getName());
                    }
                    deleteObjects(region, c, rmvNames);
                }
            }
        }

        return manifestEtag;
    }
}

From source file:com.kdmanalytics.toif.assimilator.Assimilator.java

private void processKdmXmlFile(final List<File> kdmFiles)
        throws FileNotFoundException, IOException, RepositoryException, ToifException {
    if (debug) {/*from w ww.j a  v  a2 s. c  o  m*/
        LOG.debug("processing kdm file...");
        //System.err.println("processing kdm file...");
    }

    PipedInputStream in = new PipedInputStream();
    final PipedOutputStream out = new PipedOutputStream(in);
    final ThreadStatus status = new ThreadStatus();

    Thread t = new Thread(new Runnable() {

        @Override
        public void run() {
            KdmXmlHandler handler = null;
            try {
                if (kdmFiles.size() > 1) {
                    final String msg = "There should only be one .kdm file.";
                    LOG.error(msg);
                    throw new ToifException(msg);
                } else if (kdmFiles.size() == 1) {
                    File kdmFile = kdmFiles.get(0); // get the head of
                                                    // thelist.
                    handler = load(kdmFile, out);
                }
                out.flush();
                out.close();

                if (handler == null) {
                    return;
                }
                setNextId(handler.getNextId());
                setSmallestBigNumber(handler.getSmallestBigNumber());
                // increase
            } catch (IOException e) {
                final String msg = "IO exception whilst processing kdm file. "
                        + ". Possibly an existing kdm file is in your input path!";

                LOG.error(msg, e);
                status.exception = new ToifException(msg, e);
            } catch (RepositoryException e) {
                final String msg = "Repository Exception whilst processing kdm file. "
                        + ". Possibly an existing kdm file is in your input path!";

                LOG.error(msg, e);
                status.exception = new ToifException(msg, e);
            } catch (ToifException e) {
                // RJF final String msg =
                // "Processing Exception whilst processing kdm file. "
                // + ". Possibly that input file is invalid XML!";

                // LOG.error(msg, e);
                status.exception = e;
            } finally {
                if (out != null)
                    try {
                        out.close();
                    } catch (IOException e) {
                        // Just leave it alone
                        LOG.error("unable to close stream");
                    }
            }
        }
    });

    // ---------------------------------------------------------
    // Unable to change logic within the short time frame given so
    // adding a means to catch unknown exceptions in thread
    // ----------------------------------------------------------
    Thread.UncaughtExceptionHandler tueh = new Thread.UncaughtExceptionHandler() {

        public void uncaughtException(Thread th, Throwable ex) {
            LOG.error("Uncaught exception: " + ex);
            status.exception = (Exception) ex;
        }
    };

    t.setUncaughtExceptionHandler(tueh);
    t.start();

    streamStatementsToRepo(in);
    try {
        t.join();

        // Check if we enoutered exception during processing and
        // proxy throw if we have one
        if (status.exception != null) {
            // Leave alone if already a ToifException
            if (status.exception instanceof ToifException)
                throw (ToifException) status.exception;
            else
                throw new ToifException(status.exception);

        }
    } catch (InterruptedException e) {
        LOG.error("Interrupted");
        throw new ToifException("Interrupted");
    }
}

From source file:com.kdmanalytics.toif.assimilator.Assimilator.java

/**
 * process the tkdm files/*from w w w. j a va  2 s .  co m*/
 * 
 * @param tkdmFiles
 *          the list of tkdm files to process.
 * @return
 * @throws IOException
 * @throws ToifException
 */
private RepositoryMerger processTkdmFiles(final List<File> tkdmFiles) throws IOException, ToifException {
    final PipedInputStream in = new PipedInputStream();
    final PipedOutputStream out = new PipedOutputStream(in);

    String assemblyName = "Assembly";
    int possition = outputLocation.getName().lastIndexOf(".");
    if (possition != -1) {
        assemblyName = outputLocation.getName().substring(0, possition);
    }

    final RepositoryMerger kdmMerger = getTkdmMerger(new PrintWriter(out), assemblyName);
    new Thread(new Runnable() {

        @Override
        public void run() {
            mergeTkdm(kdmMerger, tkdmFiles);
            kdmMerger.close();
            try {
                out.close();
            } catch (IOException e) {
                LOG.error("", e);
                //e.printStackTrace();
            }
        }
    }).start();

    streamStatementsToRepo(in);
    return kdmMerger;
}

From source file:com.kdmanalytics.toif.assimilator.Assimilator.java

/**
 * process the toif files.//w  w  w . ja v  a2  s.c  om
 * 
 * @param toifFiles
 *          list of toif files to process.
 * @param smallestBigNumber2
 *          the smallest number from the end of the long scale. used for the bnodes at the end of
 *          the repository
 * @param blacklistPath
 *          string that is the name of the directory of the project root.
 * @return
 * @throws IOException
 * @throws ToifException
 */
private void processToifFiles(final List<File> toifFiles, Long id, Long smallestBigNumber2,
        String blacklistPath) throws IOException, ToifException {
    PipedInputStream toifIn = new PipedInputStream();
    final PipedOutputStream toifOut = new PipedOutputStream(toifIn);

    // final ToifMerger toifMerger = getToifMerger(new PrintWriter(toifOut),
    // id, smallestBigNumber2, blacklistPath);

    PrintWriter w = new PrintWriter(toifOut);
    final ToifMerger toifMerger = getToifMerger(w, id, smallestBigNumber2, blacklistPath);
    new Thread(new Runnable() {

        @Override
        public void run() {
            Long offset = mergeToif(toifMerger, toifFiles);

            setOffset(offset);
            try {
                toifOut.close();
            } catch (IOException e) {
                LOG.error("", e);
            }
        }

    }).start();

    streamStatementsToRepo(toifIn);
}

From source file:com.zimbra.cs.mime.Mime.java

/** Returns an {@code InputStream} to the content of a {@code MimeMessage}
 *  by starting a thread that serves up its content to a {@code
 *  PipedOutputStream}.  This workaround is necessary because JavaMail does
 *  not provide {@code InputStream} access to the content. */
public static InputStream getInputStream(MimeMessage mm) throws IOException {
    //        if (isZimbraJavaMailShim(mm)) {
    //            return ((ZMimeMessage) mm).getMessageStream();
    //        }//from ww w.  j a v  a 2 s .  co  m

    // Nasty hack because JavaMail doesn't provide an InputStream accessor
    // to the entire RFC 822 content of a MimeMessage.  Start a thread that
    // serves up the content of the MimeMessage via PipedOutputStream.
    PipedInputStream in = new PipedInputStream();
    PipedOutputStream out = new PipedOutputStream(in);
    Thread thread = new Thread(new MimeMessageOutputThread(mm, out));
    thread.setName("MimeMessageThread");
    thread.start();
    return in;
}