Example usage for java.util.zip GZIPOutputStream GZIPOutputStream

List of usage examples for java.util.zip GZIPOutputStream GZIPOutputStream

Introduction

In this page you can find the example usage for java.util.zip GZIPOutputStream GZIPOutputStream.

Prototype

public GZIPOutputStream(OutputStream out) throws IOException 

Source Link

Document

Creates a new output stream with a default buffer size.

Usage

From source file:com.zimbra.common.util.ByteUtil.java

/**
 * compress the supplied data using GZIPOutputStream
 * and return the compressed data./*from  w w  w.  j  a v a  2  s  .c  o m*/
 * @param data data to compress
 * @return compressesd data
 */
public static byte[] compress(byte[] data) throws IOException {
    ByteArrayOutputStream baos = null;
    GZIPOutputStream gos = null;
    try {
        baos = new ByteArrayOutputStream(data.length); //data.length overkill
        gos = new GZIPOutputStream(baos);
        gos.write(data);
        gos.finish();
        return baos.toByteArray();
    } finally {
        if (gos != null) {
            gos.close();
        } else if (baos != null)
            baos.close();
    }
}

From source file:com.act.lcms.db.model.MS1ScanForWellAndMassCharge.java

/**
 * Serialize an object to an array of Serialized, gzip'd bytes.
 *
 * Note that this returns a byte stream (a) to be symmetrical with deserialize, and (b) because we anticipate
 * manifesting the entire byte array at some point so there's no advantage to streaming the results.  If that changes
 * and performance suffers from allocating the entire byte array, we can use byte streams instead (and we'll probably
 * have bigger performance problems to deal with anyway).
 *
 * @param object The object to serialize
 * @param <T> The type of the object (unbound to allow serialization of Maps, which sadly don't explicitly implement
 *            Serializable)./*w ww  .j  a v  a2s  .  c  o m*/
 * @return A byte array representing a compressed object stream for the specified object.
 * @throws IOException
 */
private static <T> byte[] serialize(T object) throws IOException {
    ByteArrayOutputStream postGzipOutputStream = new ByteArrayOutputStream();

    try (ObjectOutputStream out = new ObjectOutputStream(new GZIPOutputStream(postGzipOutputStream))) {
        out.writeObject(object);
    }

    return postGzipOutputStream.toByteArray();
}

From source file:com.ebay.erl.mobius.core.collection.BigTupleList.java

/**
 * Flush {@link Tuple}s in {@link #buffer_in_memory} into
 * disk, and new local file will be created by {@link #newLocalFile()}
 * and store the {@link File} reference in {@link #buffer_on_disk} for
 * future reference./*from www. j  a  v  a2s .  co  m*/
 */
private void flushToDisk() {
    this.flushing = true;
    File localFile;

    if (this.buffer_in_memory.size() == 0) {
        // no tuple in memory
        return;
    }
    long start = System.currentTimeMillis();
    long availableMemory = this.availableMemory();

    String message = Thread.currentThread().toString() + " BID[" + this._ID + "] "
            + "writing in-memory tuples (" + getNumberFormat().format(this.buffer_in_memory.size())
            + " entries) into disk, " + "available memory:" + availableMemory / _MB + "MB.";

    LOGGER.info(message);
    if (this.reporter != null) {
        this.reporter.setStatus(message);
        this.reporter.progress();
    }

    try {
        // check if we still have enough local space to prevent 
        // full of disk exception.
        long freeDiskSpace = this.workOutput.getFreeSpace() / _MB;
        if (freeDiskSpace < 300) {
            // less than 300MB free space left, throw
            // exceptions
            throw new IOException("Not enough space left (" + freeDiskSpace + "MB remaining) on "
                    + this.workOutput.getAbsolutePath() + ".");
        }

        localFile = this.newLocalFile();
        DataOutputStream out = new DataOutputStream(
                new GZIPOutputStream(new BufferedOutputStream(new FileOutputStream(localFile))));

        // write the tuple schema in the header
        String[] tupleSchema = this.buffer_in_memory.get(0).getSchema();
        out.writeInt(tupleSchema.length);
        if (tupleSchema.length == 0)
            throw new IllegalArgumentException("Tuple with empty schema!");
        for (String aColumn : tupleSchema) {
            out.writeUTF(aColumn);
        }

        // write number of tuple in this file
        out.writeLong(this.buffer_in_memory.size());

        if (this.comparator != null) {
            // sort the Tuple in memory first
            Collections.sort(this.buffer_in_memory, this.comparator);
        }

        // write all the tuple in memory buffer
        long counts = 0L;
        for (Tuple aTuple : this.buffer_in_memory) {
            aTuple.write(out);
            counts++;
            if (counts % 5000 == 0 && this.reporter != null)// report every 5000 IO
                this.reporter.progress();
        }
        out.flush();
        out.close();

        // clear memory buffer
        this.buffer_in_memory.clear();

        long end = System.currentTimeMillis();

        LOGGER.info(Thread.currentThread().toString() + " BID[" + this._ID + "] " + "Write has completed, cost "
                + ((end - start) / 1000) + " seconds, " + "available memory:" + this.availableMemory() / _MB
                + "MB, " + "wrote to:" + localFile.getAbsolutePath() + "(size:"
                + localFile.getTotalSpace() / _MB + "MB) , " + "in memory tuples numbers:"
                + this.buffer_in_memory.size());

        this.flushing = false;
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:gov.nih.nci.ncicb.tcga.dcc.common.util.FileUtil.java

public static void createCompressedFiles(final List<String> fileNamesToBeCompressed,
        final String compressedFileName) throws IOException {

    final File compressedFile = new File(compressedFileName);
    for (final String fileNameToBeCompressed : fileNamesToBeCompressed) {
        final File fileToBeCompressed = new File(fileNameToBeCompressed);

        if (!fileToBeCompressed.exists()) {
            throw new IOException("Cache File does not exist: " + fileToBeCompressed.getPath());
        }//from  w w w .  jav  a2  s.  c o m
    }
    final FileOutputStream outputFileStream = new FileOutputStream(compressedFile);
    final TarOutputStream tarStream = new TarOutputStream(new GZIPOutputStream(outputFileStream));
    try {
        for (final String fileNameToBeCompressed : fileNamesToBeCompressed) {
            final File fileToBeCompressed = new File(fileNameToBeCompressed);
            final String name = fileToBeCompressed.getName();
            final TarEntry tarAdd = new TarEntry(fileToBeCompressed);

            tarStream.setLongFileMode(TarOutputStream.LONGFILE_GNU);
            tarAdd.setModTime(fileToBeCompressed.lastModified());
            tarAdd.setName(name);
            tarStream.putNextEntry(tarAdd);

            FileInputStream inputStream = null;
            byte[] buffer = new byte[1024 * 64];
            try {

                inputStream = new FileInputStream(fileToBeCompressed);
                int nRead = inputStream.read(buffer, 0, buffer.length);
                while (nRead >= 0) {
                    tarStream.write(buffer, 0, nRead);
                    nRead = inputStream.read(buffer, 0, buffer.length);
                }

                tarStream.closeEntry();
            } finally {
                buffer = null;
                try {
                    if (inputStream != null) {
                        inputStream.close();
                    }
                } catch (IOException ie) {
                    logger.error("Error closing I/O streams " + ie.toString());
                }
            }

        }
    } finally {
        if (tarStream != null) {
            tarStream.close();
        }

    }
}

From source file:com.appassit.http.AndroidHttpClient.java

/**
 * Compress data to send to server. Creates a Http Entity holding the gzipped data. The data will not be compressed if it is too short.
 * //www .j a v a  2  s  .  c o  m
 * @param data
 *            The bytes to compress
 * @return Entity holding the data
 */
public static AbstractHttpEntity getCompressedEntity(byte data[]) throws IOException {
    AbstractHttpEntity entity;
    if (data.length < getMinGzipSize()) {
        entity = new ByteArrayEntity(data);
    } else {
        ByteArrayOutputStream arr = new ByteArrayOutputStream();
        OutputStream zipper = new GZIPOutputStream(arr);
        zipper.write(data);
        zipper.close();
        entity = new ByteArrayEntity(arr.toByteArray());
        entity.setContentEncoding("gzip");
    }
    return entity;
}

From source file:com.ning.metrics.collector.processing.db.DatabaseCounterStorage.java

/**
 * serialize the given rolled-up counter data's distribution to a byte
 * array for storage in a blob/*  w w w . j  a  va  2s . c  om*/
 * @param counter
 * @return
 * @throws java.io.IOException technically, but unlikely because all ops are
 *          in memory
 */
public static byte[] serializeDistribution(RolledUpCounterData counter) throws IOException {

    ByteArrayOutputStream result = new ByteArrayOutputStream();
    GZIPOutputStream zipStream = new GZIPOutputStream(result);
    PrintStream printer = new PrintStream(zipStream, true, "UTF-8");

    int index = 0;

    // iterate through all the entries in the distribution and generate a
    // serialization of the form:
    //
    // uniqueId1|count1\n
    // uniqueId2|count2\n
    // ...
    //
    // and then gzip the result into a byte array
    for (Map.Entry<String, Integer> entry : counter.getDistribution().entrySet()) {
        String id = entry.getKey();
        int value = entry.getValue() == null ? 0 : entry.getValue();

        // Don't write unique ids that have a zero count
        if (value == 0) {
            continue;
        }

        if (index++ > 0) {
            printer.println();
        }

        printer.print(id);
        printer.print('|');
        printer.print(Integer.toString(value));
    }

    zipStream.finish();
    printer.close();

    return result.toByteArray();
}

From source file:com.hypersocket.netty.HttpRequestDispatcherHandler.java

private InputStream processContent(HttpRequestServletWrapper servletRequest,
        HttpResponseServletWrapper servletResponse, String acceptEncodings) {

    InputStream writer = (InputStream) servletRequest.getAttribute(CONTENT_INPUTSTREAM);

    if (writer != null) {
        if (log.isDebugEnabled()) {
            log.debug("Response for " + servletRequest.getRequestURI() + " will be chunked");
        }//  w  ww  .  jav a 2  s  .  c  o  m
        servletResponse.setChunked(true);
        servletResponse.removeHeader(HttpHeaders.CONTENT_LENGTH);
        servletResponse.setHeader(HttpHeaders.TRANSFER_ENCODING, "chunked");
        return writer;
    }

    if (servletResponse.getContent().readableBytes() > 0) {

        ChannelBuffer buffer = servletResponse.getContent();
        boolean doGzip = false;

        if (servletResponse.getNettyResponse().getHeader("Content-Encoding") == null) {
            if (acceptEncodings != null) {
                doGzip = acceptEncodings.indexOf("gzip") > -1;
            }

            if (doGzip) {
                try {
                    ByteArrayOutputStream gzipped = new ByteArrayOutputStream();
                    GZIPOutputStream gzip = new GZIPOutputStream(gzipped);
                    gzip.write(buffer.array(), 0, buffer.readableBytes());
                    gzip.finish();
                    buffer = ChannelBuffers.wrappedBuffer(gzipped.toByteArray());
                    servletResponse.setHeader("Content-Encoding", "gzip");
                } catch (IOException e) {
                    log.error("Failed to gzip response", e);
                }
            }
        }

        servletResponse.getNettyResponse().setContent(buffer);
        servletResponse.setHeader("Content-Length", String.valueOf(buffer.readableBytes()));
    } else {
        servletResponse.setHeader("Content-Length", "0");
    }

    return null;
}