List of usage examples for java.util.zip GZIPOutputStream GZIPOutputStream
public GZIPOutputStream(OutputStream out) throws IOException
From source file:com.cloudant.sync.datastore.AttachmentStreamFactory.java
/** * Get stream for writing attachment data to disk. * * Opens the output stream using {@see FileUtils#openOutputStream(File)}. * * Data should be written to the stream unencoded, unencrypted. * * @param file File to write to./*from w ww. j av a 2 s. co m*/ * @param encoding Encoding to use. * @return Stream for writing. * @throws IOException if there's a problem writing to disk, including issues with * encryption (bad key length and other key issues). */ public OutputStream getOutputStream(File file, Attachment.Encoding encoding) throws IOException { // First, open a stream to the raw bytes on disk. // Then, if we have a key assume the file should be encrypted before writing, // so wrap the file stream in a stream which encrypts during writing. // If the attachment needs encoding, we need to encode the data before it // is encrypted, so wrap a stream which will encode (gzip) before passing // to encryption stream, or directly to file stream if not encrypting. // // User writes [-> Encoding Stream] [-> Encryption Stream] -> write to disk OutputStream os = FileUtils.openOutputStream(file); if (key != null) { try { // Create IV byte[] iv = new byte[16]; new SecureRandom().nextBytes(iv); os = new EncryptedAttachmentOutputStream(os, key, iv); } catch (InvalidKeyException ex) { // Replace with an IOException as we validate the key when opening // the databases and the key should be the same -- it's therefore // not worth forcing the developer to catch something they can't // fix during file read; generic IOException works better. throw new IOException("Bad key used to write file; check encryption key.", ex); } catch (InvalidAlgorithmParameterException ex) { // We are creating what should be a valid IV for AES, 16-bytes. // Therefore this shouldn't happen. Again, the developer cannot // fix it, so wrap in an IOException as we can't write the file. throw new IOException("Bad key used to write file; check encryption key.", ex); } } switch (encoding) { case Plain: break; // nothing to do case Gzip: os = new GZIPOutputStream(os); } return os; }
From source file:gov.nih.nci.firebird.service.file.FileServiceBean.java
@Override public FirebirdFile createFile(byte[] content, FileMetadata fileMetadata) throws IOException { if (content.length > (long) Integer.MAX_VALUE) { throw new IllegalArgumentException("file too large"); }/*from w ww .java2 s .com*/ ByteArrayInputStream bin = new ByteArrayInputStream(content); ByteArrayOutputStream bout = new ByteArrayOutputStream(); GZIPOutputStream zout = new GZIPOutputStream(bout); IOUtils.copy(bin, zout); zout.close(); zout.finish(); return createFirebirdFile(bout.toByteArray(), fileMetadata, content.length); }
From source file:com.flurry.proguard.UploadMapping.java
/** * Create a gzipped tar archive containing the ProGuard/Native mapping files * * @param files array of mapping.txt files * @return the tar-gzipped archive/*from w w w. j av a2 s. c o m*/ */ private static File createArchive(List<File> files, String uuid) { try { File tarZippedFile = File.createTempFile("tar-zipped-file", ".tgz"); TarArchiveOutputStream taos = new TarArchiveOutputStream( new GZIPOutputStream(new BufferedOutputStream(new FileOutputStream(tarZippedFile)))); for (File file : files) { taos.putArchiveEntry(new TarArchiveEntry(file, (uuid != null && !uuid.isEmpty() ? uuid : UUID.randomUUID()) + ".txt")); IOUtils.copy(new FileInputStream(file), taos); taos.closeArchiveEntry(); } taos.finish(); taos.close(); return tarZippedFile; } catch (IOException e) { failWithError("IO Exception while trying to tar and zip the file.", e); return null; } }
From source file:packjacket.StaticUtils.java
/** * GZips the main log file//from w w w . j a v a 2 s . c om * @return the gzipped file * @throws IOException if any I/O error occurs */ public static File gzipLog() throws IOException { //Write out buffer of log file RunnerClass.nfh.flush(); //Initialize log and gzip-log files File log = new File(RunnerClass.homedir + "pj.log"); GZIPOutputStream out = new GZIPOutputStream( new FileOutputStream(new File(log.getCanonicalPath() + ".pjl"))); FileInputStream in = new FileInputStream(log); //How many bytes to copy with each incrmental copy of file. int bufferSize = 4 * 1024; //Buffer into which data is read from source file byte[] buffer = new byte[bufferSize]; //How many bytes read so far int bytesRead; //Runs until no bytes left to read from source while ((bytesRead = in.read(buffer)) >= 0) out.write(buffer, 0, bytesRead); //Close streams out.close(); in.close(); return new File(log.getCanonicalPath() + ".pjl"); }
From source file:grails.plugin.cache.web.PageInfo.java
/** * @param ungzipped the bytes to be gzipped * @return gzipped bytes//from w w w .j a v a 2 s .c om */ protected byte[] gzip(byte[] ungzipped) throws IOException, AlreadyGzippedException { if (isGzipped(ungzipped)) { throw new AlreadyGzippedException("The byte[] is already gzipped. It should not be gzipped again."); } ByteArrayOutputStream bytes = new ByteArrayOutputStream(); GZIPOutputStream gzipOutputStream = new GZIPOutputStream(bytes); gzipOutputStream.write(ungzipped); gzipOutputStream.close(); return bytes.toByteArray(); }
From source file:edu.cornell.med.icb.goby.alignments.UpgradeTo1_9_8_2.java
private void upgradeHeaderVersion(String basename) throws IOException { InputStream headerStream;/* w w w.j a v a 2 s.c o m*/ try { headerStream = new GZIPInputStream(new FileInputStream(basename + ".header")); } catch (IOException e) { // try not compressed for compatibility with 1.4-: LOG.trace("falling back to legacy 1.4- uncompressed header."); headerStream = new FileInputStream(basename + ".header"); } // accept very large header messages, since these may contain query identifiers: final CodedInputStream codedInput = CodedInputStream.newInstance(headerStream); codedInput.setSizeLimit(Integer.MAX_VALUE); final Alignments.AlignmentHeader header = Alignments.AlignmentHeader.parseFrom(codedInput); Alignments.AlignmentHeader.Builder upgradedHeader = Alignments.AlignmentHeader.newBuilder(header); upgradedHeader.setVersion(VersionUtils.getImplementationVersion(UpgradeTo1_9_8_2.class)); FileUtils.moveFile(new File(basename + ".header"), new File(makeBackFilename(basename + ".header", ".bak"))); GZIPOutputStream headerOutput = new GZIPOutputStream(new FileOutputStream(basename + ".header")); try { upgradedHeader.build().writeTo(headerOutput); } finally { headerOutput.close(); } }
From source file:com.cfelde.aws.ddb.management.TableThroughput.java
private static void storePartitionState() { synchronized (lock) { LOG.info("Storing partition state file: " + stateFile.getAbsolutePath()); try (ObjectOutputStream oos = new ObjectOutputStream( new GZIPOutputStream(new FileOutputStream(stateFile)))) { oos.writeObject(allPartitionEstimators); } catch (IOException ex) { LOG.warn("Failed to store partition state file: " + ex.getMessage(), ex); stateFile.delete();//ww w .ja va2 s. c o m } } }
From source file:it.acubelab.smaph.SmaphUtils.java
/** * Compress a string with GZip.//from w w w .j a v a 2 s. co m * * @param str * the string. * @return the compressed string. * @throws IOException * if something went wrong during compression. */ public static byte[] compress(String str) throws IOException { ByteArrayOutputStream out = new ByteArrayOutputStream(); GZIPOutputStream gzip = new GZIPOutputStream(out); gzip.write(str.getBytes()); gzip.close(); return out.toByteArray(); }
From source file:ezbake.deployer.utilities.ArtifactHelpers.java
/** * Append to the given ArchiveInputStream writing to the given outputstream, the given entries to add. * This will duplicate the InputStream to the Output. * * @param inputStream - archive input to append to * @param output - what to copy the modified archive to * @param filesToAdd - what entries to append. *//*from w w w.j a v a 2s. com*/ private static void appendFilesInTarArchive(ArchiveInputStream inputStream, OutputStream output, Iterable<ArtifactDataEntry> filesToAdd) throws DeploymentException { ArchiveStreamFactory asf = new ArchiveStreamFactory(); try { HashMap<String, ArtifactDataEntry> newFiles = new HashMap<>(); for (ArtifactDataEntry entry : filesToAdd) { newFiles.put(entry.getEntry().getName(), entry); } GZIPOutputStream gzs = new GZIPOutputStream(output); TarArchiveOutputStream aos = (TarArchiveOutputStream) asf .createArchiveOutputStream(ArchiveStreamFactory.TAR, gzs); aos.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU); // copy the existing entries ArchiveEntry nextEntry; while ((nextEntry = inputStream.getNextEntry()) != null) { //If we're passing in the same file, don't copy into the new archive if (!newFiles.containsKey(nextEntry.getName())) { aos.putArchiveEntry(nextEntry); IOUtils.copy(inputStream, aos); aos.closeArchiveEntry(); } } for (ArtifactDataEntry entry : filesToAdd) { aos.putArchiveEntry(entry.getEntry()); IOUtils.write(entry.getData(), aos); aos.closeArchiveEntry(); } aos.finish(); gzs.finish(); } catch (ArchiveException | IOException e) { log.error(e.getMessage(), e); throw new DeploymentException(e.getMessage()); } }