List of usage examples for java.util.zip GZIPOutputStream GZIPOutputStream
public GZIPOutputStream(OutputStream out) throws IOException
From source file:com.wbtech.dao.NetworkUitlity.java
/** * ?/* ww w. ja v a2 s . c o m*/ * @param str * @return */ public static byte[] compressToByte(String str) { if (str == null || str.length() == 0) { return null; } ByteArrayOutputStream out = new ByteArrayOutputStream(); GZIPOutputStream gzip; try { gzip = new GZIPOutputStream(out); gzip.write(str.getBytes("utf-8")); gzip.close(); } catch (IOException e) { e.printStackTrace(); } return out.toByteArray(); }
From source file:com.gargoylesoftware.htmlunit.util.DebuggingWebConnectionTest.java
/** * Ensures that Content-Encoding headers are removed when JavaScript is uncompressed. * (was causing java.io.IOException: Not in GZIP format as of HtmlUnit-2.10). * @throws Exception if the test fails/*from w ww . j a v a2 s . c o m*/ */ @Test public void gzip() throws Exception { final ByteArrayOutputStream baos = new ByteArrayOutputStream(); final GZIPOutputStream gzipOutputStream = new GZIPOutputStream(baos); IOUtils.write("alert(1)", gzipOutputStream, "UTF-8"); gzipOutputStream.close(); final MockWebConnection mockConnection = new MockWebConnection(); final List<NameValuePair> responseHeaders = Arrays.asList(new NameValuePair("Content-Encoding", "gzip")); mockConnection.setResponse(getDefaultUrl(), baos.toByteArray(), 200, "OK", "application/javascript", responseHeaders); final String dirName = "test-" + getClass().getSimpleName(); try (final DebuggingWebConnection dwc = new DebuggingWebConnection(mockConnection, dirName)) { final WebRequest request = new WebRequest(getDefaultUrl()); final WebResponse response = dwc.getResponse(request); // was throwing here assertNull(response.getResponseHeaderValue("Content-Encoding")); FileUtils.deleteDirectory(dwc.getReportFolder()); } }
From source file:de.tudarmstadt.ukp.dkpro.tc.mallet.util.MalletUtils.java
public static void writeNewLineToFile(File outputFile) throws IOException { BufferedWriter bw = new BufferedWriter( new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(outputFile, true)), "UTF-8")); bw.write("\n"); bw.flush();// w w w . j a va2 s .c om bw.close(); }
From source file:org.wso2.carbon.automation.extensions.servers.httpserver.SimpleHttpClient.java
/** * Send a HTTP POST request to the specified URL * * @param url Target endpoint URL * @param headers Any HTTP headers that should be added to the request * @param payload Content payload that should be sent * @param contentType Content-type of the request * @return Returned HTTP response/*from w ww .j a va 2s . c o m*/ * @throws IOException If an error occurs while making the invocation */ public HttpResponse doPost(String url, final Map<String, String> headers, final String payload, String contentType) throws IOException { HttpUriRequest request = new HttpPost(url); setHeaders(headers, request); HttpEntityEnclosingRequest entityEncReq = (HttpEntityEnclosingRequest) request; final boolean zip = headers != null && "gzip".equals(headers.get(HttpHeaders.CONTENT_ENCODING)); EntityTemplate ent = new EntityTemplate(new ContentProducer() { public void writeTo(OutputStream outputStream) throws IOException { OutputStream out = outputStream; if (zip) { out = new GZIPOutputStream(outputStream); } out.write(payload.getBytes(Charset.defaultCharset())); out.flush(); out.close(); } }); ent.setContentType(contentType); if (zip) { ent.setContentEncoding("gzip"); } entityEncReq.setEntity(ent); return client.execute(request); }
From source file:com.cloudera.sqoop.io.SplittingOutputStream.java
/** Initialize the OutputStream to the next file to write to. *//* w w w . j av a 2 s . c om*/ private void openNextFile() throws IOException { FileSystem fs = FileSystem.get(conf); StringBuffer sb = new StringBuffer(); Formatter fmt = new Formatter(sb); fmt.format("%05d", this.fileNum++); String filename = filePrefix + fmt.toString(); if (this.doGzip) { filename = filename + ".gz"; } Path destFile = new Path(destDir, filename); LOG.debug("Opening next output file: " + destFile); if (fs.exists(destFile)) { Path canonicalDest = destFile.makeQualified(fs); throw new IOException("Destination file " + canonicalDest + " already exists"); } OutputStream fsOut = fs.create(destFile); // Count how many actual bytes hit HDFS. this.countingFilterStream = new CountingOutputStream(fsOut); if (this.doGzip) { // Wrap that in a Gzip stream. this.writeStream = new GZIPOutputStream(this.countingFilterStream); } else { // Write to the counting stream directly. this.writeStream = this.countingFilterStream; } }
From source file:de.iai.ilcd.model.common.XmlFile.java
/** * Compress the content of XML file prior to persist/merge events in order to save database space and be compatible * with MySQL server default configurations * as long as possible (1MB max package size) * //from w w w .jav a2s . co m * @throws Exception * if anything goes wrong, just in-memory IO operations, should not happen * @see #decompressContent() */ @PrePersist protected void compressContent() throws Exception { if (this.content == null) { this.compressedContent = null; } else { ByteArrayOutputStream out = new ByteArrayOutputStream(); GZIPOutputStream gzipOut = new GZIPOutputStream(out); gzipOut.write(this.content.getBytes("UTF-8")); gzipOut.flush(); gzipOut.close(); this.compressedContent = out.toByteArray(); } }
From source file:org.wso2.esb.integration.common.utils.clients.SimpleHttpClient.java
/** * Send a HTTP POST request to the specified URL * * @param url Target endpoint URL * @param headers Any HTTP headers that should be added to the request * @param payload Content payload that should be sent * @param contentType Content-type of the request * @return Returned HTTP response// w w w . ja v a 2 s . com * @throws IOException If an error occurs while making the invocation */ public HttpResponse doPost(String url, final Map<String, String> headers, final String payload, String contentType) throws IOException { HttpUriRequest request = new HttpPost(url); setHeaders(headers, request); HttpEntityEnclosingRequest entityEncReq = (HttpEntityEnclosingRequest) request; final boolean zip = headers != null && "gzip".equals(headers.get(HttpHeaders.CONTENT_ENCODING)); EntityTemplate ent = new EntityTemplate(new ContentProducer() { public void writeTo(OutputStream outputStream) throws IOException { OutputStream out = outputStream; if (zip) { out = new GZIPOutputStream(outputStream); } out.write(payload.getBytes()); out.flush(); out.close(); } }); ent.setContentType(contentType); if (zip) { ent.setContentEncoding("gzip"); } entityEncReq.setEntity(ent); return client.execute(request); }
From source file:eu.delving.sip.base.Harvestor.java
@Override public void run() { try {/*from w w w .j ava 2 s . co m*/ if (context.harvestPrefix() == null || context.harvestPrefix().trim().isEmpty()) { throw new IllegalArgumentException("Harvest prefix missing"); } new URL(context.harvestUrl()); // throws MalformedUrlException if it is OutputStream outputStream = new GZIPOutputStream(new FileOutputStream(dataSet.importedOutput())); XMLEventWriter out = outputFactory.createXMLEventWriter(new OutputStreamWriter(outputStream, "UTF-8")); out.add(eventFactory.createStartDocument()); out.add(eventFactory.createCharacters("\n")); progressListener.setProgress(recordCount); HttpEntity fetchedRecords = fetchFirstEntity(); String resumptionToken = saveRecords(fetchedRecords, out); while (isValidResumptionToken(resumptionToken) && recordCount > 0) { EntityUtils.consume(fetchedRecords); progressListener.setProgress(recordCount); fetchedRecords = fetchNextEntity(resumptionToken); resumptionToken = saveRecords(fetchedRecords, out); if (!isValidResumptionToken(resumptionToken) && recordCount > 0) EntityUtils.consume(fetchedRecords); } out.add(eventFactory.createEndElement("", "", ENVELOPE_TAG)); out.add(eventFactory.createCharacters("\n")); out.add(eventFactory.createEndDocument()); out.flush(); outputStream.close(); } catch (CancelException e) { progressListener.getFeedback().alert("Cancelled harvest of " + context.harvestUrl(), e); recordCount = 0; } catch (Exception e) { progressListener.getFeedback().alert(String.format("Unable to complete harvest of %s because of: %s", context.harvestUrl(), e.getMessage()), e); recordCount = 0; } finally { if (recordCount > 0) { progressListener.getFeedback().alert(String.format("Harvest of %s successfully fetched %d records", context.harvestUrl(), recordCount)); } else { FileUtils.deleteQuietly(dataSet.importedOutput()); } } }
From source file:gov.nih.nci.caarray.domain.MultiPartBlob.java
/** * Method that takes an input stream and breaks it up in to multiple blobs. Note that this method loads each chunk * in to a byte[], while this is not ideal, this will be done by the mysql driver anyway, so we are not adding a new * inefficiency.//w ww . jav a2 s. c o m * * @param data the input stream to store. * @param compress true to compress the data, false to leave it uncompressed * @param blobPartSize the maximum size of a single blob * @throws IOException on error reading from the stream. */ public void writeData(InputStream data, boolean compress, int blobPartSize) throws IOException { final ByteArrayOutputStream byteStream = new ByteArrayOutputStream(); OutputStream writeStream; if (compress) { writeStream = new GZIPOutputStream(byteStream); } else { writeStream = byteStream; } byte[] unwritten = new byte[0]; final byte[] uncompressed = new byte[blobPartSize]; int len = 0; while ((len = data.read(uncompressed)) > 0) { uncompressedSize += len; writeStream.write(uncompressed, 0, len); if (byteStream.size() + unwritten.length >= blobPartSize) { compressedSize += byteStream.size(); unwritten = writeData(ArrayUtils.addAll(unwritten, byteStream.toByteArray()), blobPartSize, false); byteStream.reset(); } } IOUtils.closeQuietly(writeStream); compressedSize += byteStream.size(); writeData(ArrayUtils.addAll(unwritten, byteStream.toByteArray()), blobPartSize, true); }
From source file:be.ibridge.kettle.trans.step.sortrows.SortRows.java
private boolean addBuffer(Row r) { if (r != null) { data.buffer.add(r); // Save row }/*w ww. ja v a2s.c om*/ if (data.files.size() == 0 && r == null) // No more records: sort buffer { quickSort(data.buffer); } // time to write to disk: buffer is full! if (data.buffer.size() == meta.getSortSize() // Buffer is full: sort & dump to disk || (data.files.size() > 0 && r == null && data.buffer.size() > 0) // No more records: join from disk ) { // First sort the rows in buffer[] quickSort(data.buffer); // Then write them to disk... DataOutputStream dos; GZIPOutputStream gzos; int p; try { FileObject fileObject = KettleVFS.createTempFile(meta.getPrefix(), ".tmp", StringUtil.environmentSubstitute(meta.getDirectory())); data.files.add(fileObject); // Remember the files! OutputStream outputStream = fileObject.getContent().getOutputStream(); if (meta.getCompress()) { gzos = new GZIPOutputStream(new BufferedOutputStream(outputStream)); dos = new DataOutputStream(gzos); } else { dos = new DataOutputStream(outputStream); gzos = null; } // How many records do we have? dos.writeInt(data.buffer.size()); for (p = 0; p < data.buffer.size(); p++) { if (p == 0) { // Save the metadata, keep it in memory data.rowMeta.add(new Row(((Row) data.buffer.get(p)))); } // Just write the data, nothing else ((Row) data.buffer.get(p)).writeData(dos); } // Close temp-file dos.close(); // close data stream if (gzos != null) { gzos.close(); // close gzip stream } outputStream.close(); // close file stream } catch (Exception e) { logError("Error processing temp-file: " + e.toString()); return false; } data.buffer.clear(); } return true; }