List of usage examples for java.util.zip GZIPOutputStream close
public void close() throws IOException
From source file:com.unilever.audit.services2.Sync_Down.java
/** * Retrieves representation of an instance of * com.unilever.audit.services2.AuditResource * * @param id//w w w . ja v a 2 s . c o m * @param dataType * @return an instance of java.lang.String */ @GET @Path("getSyncObject/{id}/{dataType}/{compress}") @Produces("application/json") public byte[] getSyncObject(@PathParam("id") int id, @PathParam("dataType") String dataType, @PathParam("compress") int compress) { GZIPOutputStream gzip = null; count++; ByteArrayOutputStream out = null; SyncDownObjects syncDownObjects = getObject(dataType, id); try { out = new ByteArrayOutputStream(); gzip = new GZIPOutputStream(out); ObjectMapper mapper = new ObjectMapper(); AnnotationIntrospector introspector = new JaxbAnnotationIntrospector(); AnnotationIntrospector introspector1 = new JacksonAnnotationIntrospector(); mapper.setAnnotationIntrospectors(introspector, introspector1); mapper.setDateFormat(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")); //String jsonString = mapper.writeValueAsString(syncDownObjects); //JSONObject jsonobject = (JSONObject) new JSONParser().parse(jsonString); //gzip.write(jsonobject.toString().getBytes("8859_1")); //gzip.write(jsonobject.toString().getBytes("UTF-8")); gzip.write(mapper.writeValueAsBytes(syncDownObjects)); gzip.close(); } catch (IOException ex) { ex.printStackTrace(); } //catch (ParseException ex) { // ex.printStackTrace(); // } System.out.println("======================= count : " + count); return out.toByteArray(); }
From source file:com.funambol.transport.http.server.Sync4jServlet.java
/** * Sets the content of HTTP response.// w w w .j a v a 2 s . co m * * Compresses the response if the Accept-Encoding is gzip or deflate. * Sets the Content-Encoding according to the encoding used. * Sets the Content-Length with the length of the compressed response. * Sets the Uncompressed-Content-Length with the length of the uncompressed * response. * The response will be compressed only if the length of the uncompressed * response is greater than the give sizeThreashold. * * @param httpResponse the HttpServletResponse * @param requestAcceptEncoding the <code>Accept-Encoding</code> specified * in the request * @param sizeThreshold if the response is smaller of this value, it * should not be compressed * @param resp the SyncResponse object contains the response message * @param requestTime the time in which the request is arrived to servlet * @param sessionId the session identifier * @throws java.io.IOException if an error occurs * */ private void setResponseContent(HttpServletResponse httpResponse, String requestAcceptEncoding, String sizeThreshold, SyncResponse resp, long requestTime, String sessionId) throws IOException { byte[] responseContent = null; OutputStream out = null; try { out = httpResponse.getOutputStream(); responseContent = resp.getMessage(); int uncompressedContentLength = responseContent.length; if (supportedEncoding != null && !"".equals(supportedEncoding) && enableCompression) { if (log.isTraceEnabled()) { log.trace("Setting Accept-Encoding to " + supportedEncoding); } httpResponse.setHeader(HEADER_ACCEPT_ENCODING, supportedEncoding); } String encodingToUse = null; if (requestAcceptEncoding != null) { if (requestAcceptEncoding.indexOf(COMPRESSION_TYPE_GZIP) != -1 && requestAcceptEncoding.indexOf(COMPRESSION_TYPE_DEFLATE) != -1) { encodingToUse = preferredEncoding; } else if (requestAcceptEncoding.indexOf(COMPRESSION_TYPE_DEFLATE) != -1) { encodingToUse = COMPRESSION_TYPE_DEFLATE; } else if (requestAcceptEncoding.indexOf(COMPRESSION_TYPE_GZIP) != -1) { encodingToUse = COMPRESSION_TYPE_GZIP; } } int threshold = 0; try { if (sizeThreshold != null && sizeThreshold.length() != 0) { threshold = Integer.parseInt(sizeThreshold); } } catch (NumberFormatException ex) { // // Ignoring the specified value // if (log.isTraceEnabled()) { log.trace("The size threshold specified by the client (" + sizeThreshold + ") is not valid."); } } // // If the encodingToUse is null or the // uncompressed response length is less than // sizeThreshold, the response will not be compressed. // if (encodingToUse == null || uncompressedContentLength < threshold) { if (log.isTraceEnabled()) { if (enableCompression) { if (requestAcceptEncoding == null) { log.trace( "The client doesn't support any encoding. " + "The response is not compressed"); } else if (encodingToUse == null) { log.trace("The specified Accept-Encoding (" + requestAcceptEncoding + ") is not recognized. The response is not compressed"); } else if (uncompressedContentLength < threshold) { log.trace("The response is not compressed because smaller than " + threshold); } } } if (log.isTraceEnabled()) { log.trace("Setting Content-Length to: " + uncompressedContentLength); } httpResponse.setContentLength(uncompressedContentLength); out.write(responseContent); out.flush(); return; } if (encodingToUse != null) { if (log.isTraceEnabled()) { log.trace("Compressing the response using: " + encodingToUse); log.trace("Setting Uncompressed-Content-Length to: " + uncompressedContentLength); } httpResponse.setHeader(HEADER_UNCOMPRESSED_CONTENT_LENGTH, String.valueOf(uncompressedContentLength)); if (encodingToUse.equals(COMPRESSION_TYPE_GZIP)) { ByteArrayOutputStream bos = new ByteArrayOutputStream(); GZIPOutputStream outTmp = new GZIPOutputStream(bos); outTmp.write(responseContent, 0, uncompressedContentLength); outTmp.flush(); outTmp.close(); // // Get the compressed data // responseContent = bos.toByteArray(); int compressedLength = responseContent.length; if (log.isTraceEnabled()) { log.trace("Setting Content-Length to: " + compressedLength); log.trace("Setting Content-Encoding to: " + COMPRESSION_TYPE_GZIP); } httpResponse.setContentLength(compressedLength); httpResponse.setHeader(HEADER_CONTENT_ENCODING, COMPRESSION_TYPE_GZIP); out.write(responseContent); out.flush(); } else if (encodingToUse.equals(COMPRESSION_TYPE_DEFLATE)) { // // Create the compressor with specificated level of compression // Deflater compressor = new Deflater(); compressor.setLevel(compressionLevel); compressor.setInput(responseContent); compressor.finish(); // // Create an expandable byte array to hold the compressed data. // You cannot use an array that's the same size as the orginal because // there is no guarantee that the compressed data will be smaller than // the uncompressed data. // ByteArrayOutputStream bos = new ByteArrayOutputStream(uncompressedContentLength); // // Compress the response // byte[] buf = new byte[SIZE_INPUT_BUFFER]; while (!compressor.finished()) { int count = compressor.deflate(buf); bos.write(buf, 0, count); } // // Get the compressed data // responseContent = bos.toByteArray(); int compressedLength = responseContent.length; if (log.isTraceEnabled()) { log.trace("Setting Content-Length to: " + compressedLength); log.trace("Setting Content-Encoding to: " + COMPRESSION_TYPE_DEFLATE); } httpResponse.setContentLength(compressedLength); httpResponse.setHeader(HEADER_CONTENT_ENCODING, COMPRESSION_TYPE_DEFLATE); out.write(responseContent); out.flush(); } } } finally { if (out != null) { out.close(); } if (logMessages) { logResponse(responseContent, requestTime, sessionId); } } }
From source file:org.dspace.EDMExport.controller.homeController.java
/** * Controller para la url /viewXml.htm con mtodo GET y sin parmetros. * Se recogen los datos del formulario con la configuracin de elementos EDM para mostrar el xml de los tems seleccionados. * Si hay errores se redirige a la lista de tems seleccionados, si no se muestra el xml con los datos en EDM. * Si el xml a mostrar excede los 2MB y no se comprime con el servlet se comprime el contenido y se enva tambin. * /*from ww w .j a va 2 s.c o m*/ * @param edmExportBOFormEDMData objeto que recoge los datos pasados del formulario {@link EDMExportBOFormEDMData} * @param result objeto con el que se une la peticin y se valida {@link BindingResult} * @param model objeto de Spring Model con la peticin {@link Model} * @param request objeto de la peticin http para recoger los datos del objeto flash (como la sesin pero dura una peticin) * @return cadena con la vista a renderizar y mostrar */ @RequestMapping(value = "/viewXml.htm", method = RequestMethod.GET) public String getViewXML(@ModelAttribute(value = "FormEDMData") EDMExportBOFormEDMData edmExportBOFormEDMData, BindingResult result, Model model, HttpServletRequest request) { logger.debug("homeController.getViewXML"); Map<String, ?> map = RequestContextUtils.getInputFlashMap(request); if (map == null) { logger.debug("No FlashMap"); return "redirect:selectedItems.htm"; } if (result.hasErrors()) { logErrorValid(result); return "redirect:selectedItems.htm"; } else { edmExportServiceXML.setEdmExportServiceListItems(edmExportServiceListItems); String edmXML = edmExportServiceXML.showEDMXML(edmExportBOFormEDMData, servletContext.getRealPath("")); logger.debug(edmXML); String edmXMLEncoded = ""; String encoding = request.getHeader("Content-Encoding"); //while (edmXML.length() <= 1500000) edmXML += edmXML; if (edmXML.length() > 1500000 && (encoding == null || (encoding != null && encoding.isEmpty()))) { ByteArrayOutputStream output = null; GZIPOutputStream gzOut = null; try { output = new ByteArrayOutputStream(); gzOut = new GZIPOutputStream(output); gzOut.write(edmXML.getBytes("UTF-8")); gzOut.finish(); byte[] encoded = Base64.encodeBase64(output.toByteArray()); edmXMLEncoded = new String(encoded, "UTF-8"); } catch (IOException e) { logger.debug("IOException", e); } finally { try { if (output != null) output.close(); if (gzOut != null) gzOut.close(); } catch (IOException e) { logger.debug("IOException", e); } } } logger.debug(edmXMLEncoded); model.addAttribute("formatXML", edmExportBOFormEDMData.getXmlFormat()); model.addAttribute("edmXML", edmXML); model.addAttribute("edmXMLEncoded", edmXMLEncoded); model.addAttribute("listElementsFilled", edmExportServiceXML.getListElementsFilled()); return returnView("viewXml", model); } }
From source file:be.ibridge.kettle.trans.step.sortrows.SortRows.java
private boolean addBuffer(Row r) { if (r != null) { data.buffer.add(r); // Save row }//from w ww. j ava2 s . c o m if (data.files.size() == 0 && r == null) // No more records: sort buffer { quickSort(data.buffer); } // time to write to disk: buffer is full! if (data.buffer.size() == meta.getSortSize() // Buffer is full: sort & dump to disk || (data.files.size() > 0 && r == null && data.buffer.size() > 0) // No more records: join from disk ) { // First sort the rows in buffer[] quickSort(data.buffer); // Then write them to disk... DataOutputStream dos; GZIPOutputStream gzos; int p; try { FileObject fileObject = KettleVFS.createTempFile(meta.getPrefix(), ".tmp", StringUtil.environmentSubstitute(meta.getDirectory())); data.files.add(fileObject); // Remember the files! OutputStream outputStream = fileObject.getContent().getOutputStream(); if (meta.getCompress()) { gzos = new GZIPOutputStream(new BufferedOutputStream(outputStream)); dos = new DataOutputStream(gzos); } else { dos = new DataOutputStream(outputStream); gzos = null; } // How many records do we have? dos.writeInt(data.buffer.size()); for (p = 0; p < data.buffer.size(); p++) { if (p == 0) { // Save the metadata, keep it in memory data.rowMeta.add(new Row(((Row) data.buffer.get(p)))); } // Just write the data, nothing else ((Row) data.buffer.get(p)).writeData(dos); } // Close temp-file dos.close(); // close data stream if (gzos != null) { gzos.close(); // close gzip stream } outputStream.close(); // close file stream } catch (Exception e) { logError("Error processing temp-file: " + e.toString()); return false; } data.buffer.clear(); } return true; }
From source file:es.mityc.firmaJava.libreria.utilidades.Base64.java
/** * Serializes an object and returns the Base64-encoded * version of that serialized object. If the object * cannot be serialized or there is another error, * the method will return <tt>null</tt>. * <p>//w w w . ja v a 2s .co m * Valid options:<pre> * GZIP: gzip-compresses object before encoding it. * DONT_BREAK_LINES: don't break lines at 76 characters * <i>Note: Technically, this makes your encoding non-compliant.</i> * </pre> * <p> * Example: <code>encodeObject( myObj, Base64.GZIP )</code> or * <p> * Example: <code>encodeObject( myObj, Base64.GZIP | Base64.DONT_BREAK_LINES )</code> * * @param serializableObject The object to encode * @param options Specified options * @return The Base64-encoded object * @see Base64#GZIP * @see Base64#DONT_BREAK_LINES * @since 2.0 */ public static String encodeObject(Serializable serializableObject, int options) { // Streams ByteArrayOutputStream baos = null; OutputStream b64os = null; ObjectOutputStream oos = null; GZIPOutputStream gzos = null; // Isolate options int gzip = (options & ConstantesXADES.GZIP); // int dontBreakLines = (options & DONT_BREAK_LINES); try { // ObjectOutputStream -> (GZIP) -> Base64 -> ByteArrayOutputStream baos = new ByteArrayOutputStream(); b64os = new Base64.OutputStream(baos, ConstantesXADES.ENCODE | options); // GZip? if (gzip == ConstantesXADES.GZIP) { gzos = new GZIPOutputStream(b64os); oos = new ObjectOutputStream(gzos); } // end if: gzip else oos = new ObjectOutputStream(b64os); oos.writeObject(serializableObject); } // end try catch (IOException e) { log.error(e); return null; } // end catch finally { try { oos.close(); } catch (Exception e) { log.error(e); } try { gzos.close(); } catch (Exception e) { log.error(e); } try { b64os.close(); } catch (Exception e) { log.error(e); } try { baos.close(); } catch (Exception e) { log.error(e); } } // end finally // Return value according to relevant encoding. try { return new String(baos.toByteArray(), PREFERRED_ENCODING); } // end try catch (UnsupportedEncodingException uue) { return new String(baos.toByteArray()); } // end catch }
From source file:org.apache.ctakes.ytex.uima.mapper.DocumentMapperServiceImpl.java
private Document createDocument(JCas jcas, String analysisBatch, boolean bStoreDocText, boolean bStoreCAS) { Document doc = new Document(); if (bStoreDocText) doc.setDocText(jcas.getDocumentText()); doc.setAnalysisBatch(/*w w w. jav a 2 s . c o m*/ analysisBatch == null || analysisBatch.length() == 0 ? getDefaultAnalysisBatch() : analysisBatch); // look for the ctakes DocumentID anno if (setUimaDocId(jcas, doc, "org.apache.ctakes.typesystem.type.structured.DocumentID", "documentID") == null) { // look for the uima SourceDocumentInformation anno setUimaDocId(jcas, doc, "org.apache.uima.examples.SourceDocumentInformation", "uri"); } // look for document if (bStoreCAS) { try { ByteArrayOutputStream out = new ByteArrayOutputStream(); GZIPOutputStream zipOut = new GZIPOutputStream(out); XmiCasSerializer ser = new XmiCasSerializer(jcas.getTypeSystem()); XMLSerializer xmlSer = new XMLSerializer(zipOut, false); ser.serialize(jcas.getCas(), xmlSer.getContentHandler()); zipOut.close(); doc.setCas(out.toByteArray()); } catch (Exception saxException) { log.error("error serializing document cas", saxException); } } return doc; }
From source file:org.codehaus.mojo.webstart.JnlpMojo.java
private void processDependency(Artifact artifact, List artifactList) throws IOException, MojoExecutionException { // TODO: scope handler // skip provided and test scopes if (Artifact.SCOPE_PROVIDED.equals(artifact.getScope()) || Artifact.SCOPE_TEST.equals(artifact.getScope())) { return;/*from w w w.j a v a 2 s .c o m*/ } String type = artifact.getType(); // skip artifacts that are not jar or nar // nar is how we handle native libraries if (!("jar".equals(type)) && !("nar".equals(type))) { getLog().debug("Skipping artifact of type " + type + " for " + getWorkDirectory().getName()); return; } // FIXME when signed, we should update the manifest. // see http://www.mail-archive.com/turbine-maven-dev@jakarta.apache.org/msg08081.html // and maven1: maven-plugins/jnlp/src/main/org/apache/maven/jnlp/UpdateManifest.java // or shouldn't we? See MOJO-7 comment end of October. final File toCopy = artifact.getFile(); if (toCopy == null) { getLog().error("artifact with no file: " + artifact); getLog().error("artifact download url: " + artifact.getDownloadUrl()); getLog().error("artifact repository: " + artifact.getRepository()); getLog().error("artifact repository: " + artifact.getVersion()); throw new IllegalStateException( "artifact " + artifact + " has no matching file, why? Check the logs..."); } // check if this artifact has the main class in it if (artifactContainsClass(artifact, jnlp.getMainClass())) { if (artifactWithMainClass == null) { artifactWithMainClass = artifact; getLog().debug("Found main jar. Artifact " + artifactWithMainClass + " contains the main class: " + jnlp.getMainClass()); } else { getLog().warn("artifact " + artifact + " also contains the main class: " + jnlp.getMainClass() + ". IGNORED."); } } // Add the artifact to the list even if it is not processed artifactList.add(artifact); String outputName = getArtifactJnlpName(artifact); File targetDirectory = getArtifactJnlpDirFile(artifact); // Check if this file needs to be updated in the targetDirectory // currently this check is just based on the existance of a jar and if the // modified time of the jar in the maven cache is newer or older than // that in the targetDirectory. It would be better to use the version of the // jar if (!needsUpdating(toCopy, targetDirectory, outputName)) { getLog().debug("skipping " + artifact + " it has already been processed"); return; } // Instead of copying the file to its final location we make a temporary // folder and put the jar there. This way if something fails we won't // leave bad files in the final output folder File tmpArtifactDirectory = getArtifactTemporaryDirFile(artifact); File currentJar = new File(tmpArtifactDirectory, outputName); FileUtils.copyFile(toCopy, currentJar); // // pack200 and jar signing // // This used to be conditional based on a sign config and // a pack boolean. We now just try to do these things all the time // there used to be some automatic keystore generation here but // we aren't using it anymore // http://java.sun.com/j2se/1.5.0/docs/guide/deployment/deployment-guide/pack200.html // we need to pack then unpack the files before signing them File packedJar = new File(currentJar.getAbsolutePath() + ".pack"); // There is no need to gz them if we are just going to uncompress them // again. (gz isn't losy like pack is) // We should handle the case where a jar cannot be packed. String shortName = getArtifactFlatPath(artifact) + "/" + outputName; getLog().info("processing: " + shortName); boolean doPack200 = true; // We should remove any previous signature information. Signatures on the file // mess up verification of the signature, because there ends up being 2 signatures // in the jar. The pack code does a signature verification before packing so // to be safe we want to remove any signatures before we do any packing. removeSignatures(currentJar, shortName); getLog().debug("packing : " + shortName); try { Pack200.packJar(currentJar, false); } catch (Exception e) { // it will throw an ant.BuildException if it can't pack the jar // One example is with // <groupId>com.ibm.icu</groupId> // <artifactId>icu4j</artifactId> // <version>2.6.1</version> // That jar has some class that causes the packing code to die trying // to read it in. // Another time it will not be able to pack the jar if it has an invalid // signature. That one we can fix by removing the signature getLog().warn("Cannot pack: " + artifact, e); doPack200 = false; // It might have left a bad pack jar if (packedJar.exists()) { packedJar.delete(); } } if (!doPack200) { // Packing is being skipped for some reason so we need to sign // and verify it separately signJar(currentJar, shortName); if (!verifyJar(currentJar, shortName)) { // We cannot verify this jar throw new MojoExecutionException("failed to verify signed jar: " + shortName); } } else { getLog().debug("unpacking : " + shortName + ".pack"); Pack200.unpackJar(packedJar); // specs says that one should do it twice when there are unsigned jars?? // I don't know about the unsigned part, but I found a jar // that had to be packed, unpacked, packed, and unpacked before // it could be signed and packed correctly. // I suppose the best way to do this would be to try the signature // and if it fails then pack it again, instead of packing and unpack // every single jar boolean verified = false; for (int i = 0; i < 2; i++) { // This might throw a mojo exception if the signature didn't // verify. This might happen if the jar has some previous // signature information signJar(currentJar, shortName); // Now we pack and unpack the jar getLog().debug("packing : " + shortName); Pack200.packJar(currentJar, false); getLog().debug("unpacking : " + shortName + ".pack"); Pack200.unpackJar(packedJar); // Check if the jar is signed correctly if (verifyJar(currentJar, shortName)) { verified = true; break; } // verfication failed here getLog().info("verfication failed, attempt: " + i); } if (!verified) { throw new MojoExecutionException( "Failed to verify sigature after signing, " + "packing, and unpacking multiple times"); } // Now we need to gzip the resulting packed jar. getLog().debug("gzipping: " + shortName + ".pack"); FileInputStream inStream = new FileInputStream(packedJar); FileOutputStream outFileStream = new FileOutputStream(packedJar.getAbsolutePath() + ".gz"); GZIPOutputStream outGzStream = new GZIPOutputStream(outFileStream); IOUtil.copy(inStream, outGzStream); outGzStream.close(); outFileStream.close(); // delete the packed jar because we only need the gz jar packedJar.delete(); } // If we are here then it is assumed the jar has been signed, packed and verified // We need to rename all the files in the temporaryDirectory so they // go to the targetDirectory File[] tmpFiles = tmpArtifactDirectory.listFiles(); for (int i = 0; i < tmpFiles.length; i++) { File targetFile = new File(targetDirectory, tmpFiles[i].getName()); // This is better than the File.renameTo because it will through // and exception if something goes wrong FileUtils.rename(tmpFiles[i], targetFile); } tmpFiles = tmpArtifactDirectory.listFiles(); if (tmpFiles != null && tmpFiles.length != 0) { throw new MojoExecutionException("Could not move files out of: " + tmpArtifactDirectory); } tmpArtifactDirectory.delete(); getLog().debug("moved files to: " + targetDirectory); // make the snapshot copies if necessary if (jnlp.getMakeSnapshotsWithNoJNLPVersion() && artifact.isSnapshot()) { String jarBaseName = getArtifactJnlpBaseName(artifact); String snapshot_outputName = jarBaseName + "-" + artifact.getBaseVersion() + ".jar"; File versionedFile = new File(targetDirectory, getArtifactJnlpName(artifact)); // this is method should reduce the number of times a file // needs to be downloaded by an applet or webstart. However // it isn't very safe if multiple users are running this. // this method will be comparing the date of the file just // setup in the jnlp folder with the last generated snapshot copyFileToDirectoryIfNecessary(versionedFile, targetDirectory, snapshot_outputName); } // Record that this artifact was successfully processed. // this might not be necessary in the future this.processedJnlpArtifacts.add(new File(targetDirectory, outputName)); }
From source file:com.eucalyptus.blockstorage.S3SnapshotTransfer.java
/** * Compresses the snapshot and uploads it to a bucket in objectstorage gateway as a single or multipart upload based on the configuration in * {@link StorageInfo}. Bucket name should be configured before invoking this method. It can be looked up and initialized by {@link #prepareForUpload()} or * explicitly set using {@link #setBucketName(String)} * //from w ww .j a va 2 s. com * @param sourceFileName * absolute path to the snapshot on the file system */ @Override public void upload(String sourceFileName) throws SnapshotTransferException { validateInput(); // Validate input loadTransferConfig(); // Load the transfer configuration parameters from database SnapshotProgressCallback progressCallback = new SnapshotProgressCallback(snapshotId); // Setup the progress callback Boolean error = Boolean.FALSE; ArrayBlockingQueue<SnapshotPart> partQueue = null; SnapshotPart part = null; SnapshotUploadInfo snapUploadInfo = null; Future<List<PartETag>> uploadPartsFuture = null; Future<String> completeUploadFuture = null; byte[] buffer = new byte[READ_BUFFER_SIZE]; Long readOffset = 0L; Long bytesRead = 0L; Long bytesWritten = 0L; int len; int partNumber = 1; try { // Get the uncompressed file size for uploading as metadata Long uncompressedSize = getFileSize(sourceFileName); // Setup the snapshot and part entities. snapUploadInfo = SnapshotUploadInfo.create(snapshotId, bucketName, keyName); Path zipFilePath = Files.createTempFile(keyName + '-', '-' + String.valueOf(partNumber)); part = SnapshotPart.createPart(snapUploadInfo, zipFilePath.toString(), partNumber, readOffset); FileInputStream inputStream = new FileInputStream(sourceFileName); ByteArrayOutputStream baos = new ByteArrayOutputStream(); GZIPOutputStream gzipStream = new GZIPOutputStream(baos); FileOutputStream outputStream = new FileOutputStream(zipFilePath.toString()); try { LOG.debug("Reading snapshot " + snapshotId + " and compressing it to disk in chunks of size " + partSize + " bytes or greater"); while ((len = inputStream.read(buffer)) > 0) { bytesRead += len; gzipStream.write(buffer, 0, len); if ((bytesWritten + baos.size()) < partSize) { baos.writeTo(outputStream); bytesWritten += baos.size(); baos.reset(); } else { gzipStream.close(); baos.writeTo(outputStream); // Order is important. Closing the gzip stream flushes stuff bytesWritten += baos.size(); baos.reset(); outputStream.close(); if (partNumber > 1) {// Update the part status part = part.updateStateCreated(bytesWritten, bytesRead, Boolean.FALSE); } else {// Initialize multipart upload only once after the first part is created LOG.info("Uploading snapshot " + snapshotId + " to objectstorage using multipart upload"); progressCallback.setUploadSize(uncompressedSize); uploadId = initiateMulitpartUpload(uncompressedSize); snapUploadInfo = snapUploadInfo.updateUploadId(uploadId); part = part.updateStateCreated(uploadId, bytesWritten, bytesRead, Boolean.FALSE); partQueue = new ArrayBlockingQueue<SnapshotPart>(queueSize); uploadPartsFuture = Threads.enqueue(serviceConfig, UploadPartTask.class, poolSize, new UploadPartTask(partQueue, progressCallback)); } // Check for the future task before adding part to the queue. if (uploadPartsFuture != null && uploadPartsFuture.isDone()) { // This task shouldn't be done until the last part is added. If it is done at this point, then something might have gone wrong throw new SnapshotUploadPartException( "Error uploading parts, aborting part creation process. Check previous log messages for the exact error"); } // Add part to the queue partQueue.put(part); // Prep the metadata for the next part readOffset += bytesRead; bytesRead = 0L; bytesWritten = 0L; // Setup the part entity for next part zipFilePath = Files.createTempFile(keyName + '-', '-' + String.valueOf((++partNumber))); part = SnapshotPart.createPart(snapUploadInfo, zipFilePath.toString(), partNumber, readOffset); gzipStream = new GZIPOutputStream(baos); outputStream = new FileOutputStream(zipFilePath.toString()); } } gzipStream.close(); baos.writeTo(outputStream); bytesWritten += baos.size(); baos.reset(); outputStream.close(); inputStream.close(); // Update the part status part = part.updateStateCreated(bytesWritten, bytesRead, Boolean.TRUE); // Update the snapshot upload info status snapUploadInfo = snapUploadInfo.updateStateCreatedParts(partNumber); } catch (Exception e) { LOG.error("Failed to upload " + snapshotId + " due to: ", e); error = Boolean.TRUE; throw new SnapshotTransferException("Failed to upload " + snapshotId + " due to: ", e); } finally { if (inputStream != null) { inputStream.close(); } if (gzipStream != null) { gzipStream.close(); } if (outputStream != null) { outputStream.close(); } baos.reset(); } if (partNumber > 1) { // Check for the future task before adding the last part to the queue. if (uploadPartsFuture != null && uploadPartsFuture.isDone()) { // This task shouldn't be done until the last part is added. If it is done at this point, then something might have gone wrong throw new SnapshotUploadPartException( "Error uploading parts, aborting part upload process. Check previous log messages for the exact error"); } // Add the last part to the queue partQueue.put(part); // Kick off the completion task completeUploadFuture = Threads.enqueue(serviceConfig, CompleteMpuTask.class, poolSize, new CompleteMpuTask(uploadPartsFuture, snapUploadInfo, partNumber)); } else { try { LOG.info("Uploading snapshot " + snapshotId + " to objectstorage as a single object. Compressed size of snapshot (" + bytesWritten + " bytes) is less than minimum part size (" + partSize + " bytes) for multipart upload"); PutObjectResult putResult = uploadSnapshotAsSingleObject(zipFilePath.toString(), bytesWritten, uncompressedSize, progressCallback); markSnapshotAvailable(); try { part = part.updateStateUploaded(putResult.getETag()); snapUploadInfo = snapUploadInfo.updateStateUploaded(putResult.getETag()); } catch (Exception e) { LOG.debug("Failed to update status in DB for " + snapUploadInfo); } LOG.info("Uploaded snapshot " + snapshotId + " to objectstorage"); } catch (Exception e) { error = Boolean.TRUE; LOG.error("Failed to upload snapshot " + snapshotId + " due to: ", e); throw new SnapshotTransferException("Failed to upload snapshot " + snapshotId + " due to: ", e); } finally { deleteFile(zipFilePath); } } } catch (SnapshotTransferException e) { error = Boolean.TRUE; throw e; } catch (Exception e) { error = Boolean.TRUE; LOG.error("Failed to upload snapshot " + snapshotId + " due to: ", e); throw new SnapshotTransferException("Failed to upload snapshot " + snapshotId + " due to: ", e); } finally { if (error) { abortUpload(snapUploadInfo); if (uploadPartsFuture != null && !uploadPartsFuture.isDone()) { uploadPartsFuture.cancel(true); } if (completeUploadFuture != null && !completeUploadFuture.isDone()) { completeUploadFuture.cancel(true); } } } }
From source file:org.ohmage.OhmageApi.java
private HttpResponse doHttpPost(String url, HttpEntity requestEntity, boolean gzip) { HttpParams params = new BasicHttpParams(); HttpConnectionParams.setStaleCheckingEnabled(params, false); HttpConnectionParams.setConnectionTimeout(params, 20 * 1000); HttpConnectionParams.setSoTimeout(params, 20 * 1000); HttpConnectionParams.setSocketBufferSize(params, 8192); HttpClientParams.setRedirecting(params, false); SchemeRegistry schemeRegistry = new SchemeRegistry(); schemeRegistry.register(new Scheme("http", PlainSocketFactory.getSocketFactory(), 80)); schemeRegistry.register(new Scheme("https", SSLSocketFactory.getSocketFactory(), 443)); ClientConnectionManager manager = new ThreadSafeClientConnManager(params, schemeRegistry); HttpClient httpClient = new DefaultHttpClient(manager, params); HttpPost httpPost = new HttpPost(url); if (gzip) {//from ww w . j a v a 2s. c om try { //InputStream is = requestEntity.getContent(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); //BufferedOutputStream zipper = new BufferedOutputStream(new GZIPOutputStream(baos)); GZIPOutputStream zipper = new GZIPOutputStream(baos); requestEntity.writeTo(zipper); /* byte [] data2 = new byte[(int)formEntity.getContentLength()]; is.read(data2); String testing = new String(data2); Log.i("api", testing); zipper.write(data2);*/ /*int bufferSize = 1024; byte [] buffer = new byte [bufferSize]; int len = 0; String fullString = ""; while ((len = is.read(buffer, 0, bufferSize)) != -1) { zipper.write(buffer, 0, len); //fullString += new String(buffer, 0, len); Log.i("api", new String(buffer, 0, len)); } Log.i("api", fullString); is.close();*/ //zipper.flush(); //zipper.finish(); zipper.close(); ByteArrayEntity byteEntity = new ByteArrayEntity(baos.toByteArray()); //baos.close(); byteEntity.setContentEncoding("gzip"); httpPost.setEntity(byteEntity); } catch (IOException e) { Log.e(TAG, "Unable to gzip entity, using unzipped entity", e); httpPost.setEntity(requestEntity); } } else { httpPost.setEntity(requestEntity); } try { return httpClient.execute(httpPost); } catch (ClientProtocolException e) { Log.e(TAG, "ClientProtocolException while executing httpPost", e); return null; } catch (IOException e) { Log.e(TAG, "IOException while executing httpPost", e); return null; } }