List of usage examples for java.util.zip CRC32 CRC32
public CRC32()
From source file:org.apache.mnemonic.collections.DurableArrayNGTest.java
@Test(enabled = true) public void testGetSetArrayChunk() { DurableType gtypes[] = { DurableType.CHUNK }; int capacity = 10; DurableArray<DurableChunk> array = DurableArrayFactory.create(m_act, null, gtypes, capacity, false); Long handler = array.getHandler(); long chunkVal; Checksum chunkCheckSum = new CRC32(); chunkCheckSum.reset();//from w w w . jav a 2 s . c o m for (int i = 0; i < capacity; i++) { array.set(i, genuptChunk(m_act, chunkCheckSum, genRandSize())); } chunkVal = chunkCheckSum.getValue(); chunkCheckSum.reset(); for (int i = 0; i < capacity; i++) { DurableChunk<NonVolatileMemAllocator> dc = array.get(i); Assert.assertNotNull(dc); for (int j = 0; j < dc.getSize(); ++j) { byte b = unsafe.getByte(dc.get() + j); chunkCheckSum.update(b); } } Assert.assertEquals(chunkCheckSum.getValue(), chunkVal); chunkCheckSum.reset(); DurableArray<DurableChunk> restoredArray = DurableArrayFactory.restore(m_act, null, gtypes, handler, false); for (int i = 0; i < capacity; i++) { DurableChunk<NonVolatileMemAllocator> dc = restoredArray.get(i); Assert.assertNotNull(dc); for (int j = 0; j < dc.getSize(); ++j) { byte b = unsafe.getByte(dc.get() + j); chunkCheckSum.update(b); } } Assert.assertEquals(chunkCheckSum.getValue(), chunkVal); chunkCheckSum.reset(); Iterator<DurableChunk> itr = restoredArray.iterator(); int val = 0; while (itr.hasNext()) { DurableChunk<NonVolatileMemAllocator> dc = itr.next(); Assert.assertNotNull(dc); for (int j = 0; j < dc.getSize(); ++j) { byte b = unsafe.getByte(dc.get() + j); chunkCheckSum.update(b); } val++; } Assert.assertEquals(val, capacity); Assert.assertEquals(chunkCheckSum.getValue(), chunkVal); restoredArray.destroy(); }
From source file:bobs.is.compress.sevenzip.SevenZOutputFile.java
/** * Finishes the addition of entries to this archive, without closing it. * //from www . j av a 2 s. co m * @throws IOException if archive is already closed. */ public void finish() throws IOException { if (finished) { throw new IOException("This archive has already been finished"); } finished = true; final long headerPosition = file.getFilePointer(); final ByteArrayOutputStream headerBaos = new ByteArrayOutputStream(); final DataOutputStream header = new DataOutputStream(headerBaos); writeHeader(header); header.flush(); final byte[] headerBytes = headerBaos.toByteArray(); file.write(headerBytes); final CRC32 crc32 = new CRC32(); // signature header file.seek(0); file.write(SevenZFile.sevenZSignature); // version file.write(0); file.write(2); // start header final ByteArrayOutputStream startHeaderBaos = new ByteArrayOutputStream(); final DataOutputStream startHeaderStream = new DataOutputStream(startHeaderBaos); startHeaderStream.writeLong(Long.reverseBytes(headerPosition - SevenZFile.SIGNATURE_HEADER_SIZE)); startHeaderStream.writeLong(Long.reverseBytes(0xffffFFFFL & headerBytes.length)); crc32.reset(); crc32.update(headerBytes); startHeaderStream.writeInt(Integer.reverseBytes((int) crc32.getValue())); startHeaderStream.flush(); final byte[] startHeaderBytes = startHeaderBaos.toByteArray(); crc32.reset(); crc32.update(startHeaderBytes); file.writeInt(Integer.reverseBytes((int) crc32.getValue())); file.write(startHeaderBytes); }
From source file:PNGDecoder.java
/** public constructor of PNGEncoder class. * @param out output stream for PNG image format to write into * @param mode BW_MODE, GREYSCALE_MODE or COLOR_MODE */// w w w .j av a 2 s.c o m public PNGEncoder(OutputStream out, byte mode) { crc = new CRC32(); this.out = out; if (mode < 0 || mode > 2) throw new IllegalArgumentException("Unknown color mode"); this.mode = mode; }
From source file:org.apache.hadoop.raid.TestDirectoryRaidDfs.java
static public void corruptBlocksInDirectory(Configuration conf, Path srcDir, long[] crcs, Integer[] listBlockNumToCorrupt, FileSystem fileSys, MiniDFSCluster cluster, boolean validate, boolean reportBadBlocks) throws IOException { long[] lengths = new long[crcs.length]; // Get all block Info; ArrayList<BlockInfo> blocks = new ArrayList<BlockInfo>(); List<FileStatus> lfs = RaidNode.listDirectoryRaidFileStatus(conf, fileSys, srcDir); assertNotNull(lfs);//from w w w . j ava2s . c o m for (int fid = 0; fid < lfs.size(); fid++) { FileStatus fsStat = lfs.get(fid); long numBlock = RaidNode.getNumBlocks(fsStat); for (int bid = 0; bid < numBlock; bid++) { blocks.add(new BlockInfo(fid, bid)); } lengths[fid] = fsStat.getLen(); } HashSet<Integer> affectedFiles = new HashSet<Integer>(); HashSet<Integer> affectedBlocks = new HashSet<Integer>(); // corrupt blocks for (int blockNumToCorrupt : listBlockNumToCorrupt) { if (blockNumToCorrupt >= blocks.size()) { continue; } BlockInfo bi = null; int blockIndex = blockNumToCorrupt; if (blockNumToCorrupt < 0) { blockIndex = blocks.size() + blockNumToCorrupt; if (blockIndex < 0) { continue; } } if (affectedBlocks.contains(blockIndex)) { continue; } affectedBlocks.add(blockIndex); bi = blocks.get(blockIndex); FileStatus srcFileFs = lfs.get(bi.fileIdx); Path srcFile = srcFileFs.getPath(); LOG.info("Corrupt block " + bi.blockId + " of file " + srcFile); LocatedBlocks locations = RaidDFSUtil.getBlockLocations((DistributedFileSystem) fileSys, srcFile.toUri().getPath(), 0L, srcFileFs.getLen()); TestRaidDfs.corruptBlock(srcFile, locations.get(bi.blockId).getBlock(), NUM_DATANODES, true, cluster); if (reportBadBlocks) { cluster.getNameNode().reportBadBlocks(new LocatedBlock[] { locations.get(bi.blockId) }); } affectedFiles.add(bi.fileIdx); } // validate files if (validate) { DistributedRaidFileSystem raidfs = getRaidFS(fileSys, conf); for (Integer fid : affectedFiles) { FileStatus stat = lfs.get(fid); assertTrue(TestRaidDfs.validateFile(raidfs, stat.getPath(), lengths[fid], crcs[fid])); // test readFully byte[] filebytes = new byte[(int) stat.getLen()]; FSDataInputStream stm = raidfs.open(stat.getPath()); stm.readFully(0, filebytes); CRC32 crc = new CRC32(); crc.update(filebytes, 0, filebytes.length); assertEquals(crcs[fid], crc.getValue()); } } }
From source file:org.apache.cassandra.db.commitlog.CommitLog.java
public static int recover(File[] clogs) throws IOException { final Set<Table> tablesRecovered = new HashSet<Table>(); List<Future<?>> futures = new ArrayList<Future<?>>(); byte[] bytes = new byte[4096]; Map<Integer, AtomicInteger> invalidMutations = new HashMap<Integer, AtomicInteger>(); // count the number of replayed mutation. We don't really care about atomicity, but we need it to be a reference. final AtomicInteger replayedCount = new AtomicInteger(); // compute per-CF and global replay positions final Map<Integer, ReplayPosition> cfPositions = new HashMap<Integer, ReplayPosition>(); for (ColumnFamilyStore cfs : ColumnFamilyStore.all()) { // it's important to call RP.gRP per-cf, before aggregating all the positions w/ the Ordering.min call // below: gRP will return NONE if there are no flushed sstables, which is important to have in the // list (otherwise we'll just start replay from the first flush position that we do have, which is not correct). ReplayPosition rp = ReplayPosition.getReplayPosition(cfs.getSSTables()); cfPositions.put(cfs.metadata.cfId, rp); }/*from w ww. j a va 2 s.c o m*/ final ReplayPosition globalPosition = Ordering.from(ReplayPosition.comparator).min(cfPositions.values()); for (final File file : clogs) { final long segment = CommitLogSegment.idFromFilename(file.getName()); int bufferSize = (int) Math.min(Math.max(file.length(), 1), 32 * 1024 * 1024); BufferedRandomAccessFile reader = new BufferedRandomAccessFile(new File(file.getAbsolutePath()), "r", bufferSize, true); assert reader.length() <= Integer.MAX_VALUE; try { int replayPosition; if (globalPosition.segment < segment) replayPosition = 0; else if (globalPosition.segment == segment) replayPosition = globalPosition.position; else replayPosition = (int) reader.length(); if (replayPosition < 0 || replayPosition >= reader.length()) { // replayPosition > reader.length() can happen if some data gets flushed before it is written to the commitlog // (see https://issues.apache.org/jira/browse/CASSANDRA-2285) logger.debug("skipping replay of fully-flushed {}", file); continue; } reader.seek(replayPosition); if (logger.isDebugEnabled()) logger.debug("Replaying " + file + " starting at " + reader.getFilePointer()); /* read the logs populate RowMutation and apply */ while (!reader.isEOF()) { if (logger.isDebugEnabled()) logger.debug("Reading mutation at " + reader.getFilePointer()); long claimedCRC32; Checksum checksum = new CRC32(); int serializedSize; try { // any of the reads may hit EOF serializedSize = reader.readInt(); // RowMutation must be at LEAST 10 bytes: // 3 each for a non-empty Table and Key (including the 2-byte length from // writeUTF/writeWithShortLength) and 4 bytes for column count. // This prevents CRC by being fooled by special-case garbage in the file; see CASSANDRA-2128 if (serializedSize < 10) break; long claimedSizeChecksum = reader.readLong(); checksum.update(serializedSize); if (checksum.getValue() != claimedSizeChecksum) break; // entry wasn't synced correctly/fully. that's ok. if (serializedSize > bytes.length) bytes = new byte[(int) (1.2 * serializedSize)]; reader.readFully(bytes, 0, serializedSize); claimedCRC32 = reader.readLong(); } catch (EOFException eof) { break; // last CL entry didn't get completely written. that's ok. } checksum.update(bytes, 0, serializedSize); if (claimedCRC32 != checksum.getValue()) { // this entry must not have been fsynced. probably the rest is bad too, // but just in case there is no harm in trying them (since we still read on an entry boundary) continue; } /* deserialize the commit log entry */ ByteArrayInputStream bufIn = new ByteArrayInputStream(bytes, 0, serializedSize); RowMutation rm = null; try { // assuming version here. We've gone to lengths to make sure what gets written to the CL is in // the current version. so do make sure the CL is drained prior to upgrading a node. rm = RowMutation.serializer().deserialize(new DataInputStream(bufIn), MessagingService.version_, false); } catch (UnserializableColumnFamilyException ex) { AtomicInteger i = invalidMutations.get(ex.cfId); if (i == null) { i = new AtomicInteger(1); invalidMutations.put(ex.cfId, i); } else i.incrementAndGet(); continue; } if (logger.isDebugEnabled()) logger.debug(String.format("replaying mutation for %s.%s: %s", rm.getTable(), ByteBufferUtil.bytesToHex(rm.key()), "{" + StringUtils.join(rm.getColumnFamilies(), ", ") + "}")); final long entryLocation = reader.getFilePointer(); final RowMutation frm = rm; Runnable runnable = new WrappedRunnable() { public void runMayThrow() throws IOException { if (DatabaseDescriptor.getKSMetaData(frm.getTable()) == null) return; final Table table = Table.open(frm.getTable()); RowMutation newRm = new RowMutation(frm.getTable(), frm.key()); // Rebuild the row mutation, omitting column families that a) have already been flushed, // b) are part of a cf that was dropped. Keep in mind that the cf.name() is suspect. do every // thing based on the cfid instead. for (ColumnFamily columnFamily : frm.getColumnFamilies()) { if (CFMetaData.getCF(columnFamily.id()) == null) // null means the cf has been dropped continue; ReplayPosition rp = cfPositions.get(columnFamily.id()); // replay if current segment is newer than last flushed one or, if it is the last known // segment, if we are after the replay position if (segment > rp.segment || (segment == rp.segment && entryLocation > rp.position)) { newRm.add(columnFamily); replayedCount.incrementAndGet(); } } if (!newRm.isEmpty()) { Table.open(newRm.getTable()).apply(newRm, false); tablesRecovered.add(table); } } }; futures.add(StageManager.getStage(Stage.MUTATION).submit(runnable)); if (futures.size() > MAX_OUTSTANDING_REPLAY_COUNT) { FBUtilities.waitOnFutures(futures); futures.clear(); } } } finally { FileUtils.closeQuietly(reader); logger.info("Finished reading " + file); } } for (Map.Entry<Integer, AtomicInteger> entry : invalidMutations.entrySet()) logger.info(String.format("Skipped %d mutations from unknown (probably removed) CF with id %d", entry.getValue().intValue(), entry.getKey())); // wait for all the writes to finish on the mutation stage FBUtilities.waitOnFutures(futures); logger.debug("Finished waiting on mutations from recovery"); // flush replayed tables futures.clear(); for (Table table : tablesRecovered) futures.addAll(table.flush()); FBUtilities.waitOnFutures(futures); return replayedCount.get(); }
From source file:io.fabric8.maven.generator.springboot.SpringBootGenerator.java
private ZipEntry createZipEntry(File file, String fullPath) throws IOException { ZipEntry entry = new ZipEntry(fullPath); byte[] buffer = new byte[8192]; int bytesRead = -1; try (InputStream is = new FileInputStream(file)) { CRC32 crc = new CRC32(); int size = 0; while ((bytesRead = is.read(buffer)) != -1) { crc.update(buffer, 0, bytesRead); size += bytesRead;/*from ww w . ja v a 2 s.c o m*/ } entry.setSize(size); entry.setCompressedSize(size); entry.setCrc(crc.getValue()); entry.setMethod(ZipEntry.STORED); return entry; } }
From source file:com.aol.advertising.qiao.util.CommonUtils.java
public static long checksum(RandomAccessFile raFile, int numBytes) throws IOException, InsufficientFileLengthException { CRC32 _crc = new CRC32(); long pos = raFile.getFilePointer(); try {// ww w .ja va 2 s . c om byte[] buffer = new byte[numBytes]; raFile.seek(0); int n = raFile.read(buffer); if (n < numBytes) { String s; logger.warn(s = ("not enough data for checksum: current file size=" + n)); throw new InsufficientFileLengthException(s); } synchronized (_crc) { _crc.reset(); _crc.update(buffer); return _crc.getValue(); } } finally { raFile.seek(pos); } }
From source file:com.example.google.play.apkx.SampleDownloaderActivity.java
/** * Go through each of the Expansion APK files and open each as a zip file. * Calculate the CRC for each file and return false if any fail to match. * * @return true if XAPKZipFile is successful *///from w w w . j av a 2 s . c o m void validateXAPKZipFiles() { AsyncTask<Object, DownloadProgressInfo, Boolean> validationTask = new AsyncTask<Object, DownloadProgressInfo, Boolean>() { @Override protected void onPreExecute() { mDashboard.setVisibility(View.VISIBLE); mCellMessage.setVisibility(View.GONE); mStatusText.setText(R.string.text_verifying_download); mPauseButton.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { mCancelValidation = true; } }); mPauseButton.setText(R.string.text_button_cancel_verify); super.onPreExecute(); } @Override protected Boolean doInBackground(Object... params) { for (XAPKFile xf : xAPKS) { String fileName = Helpers.getExpansionAPKFileName(SampleDownloaderActivity.this, xf.mIsMain, xf.mFileVersion); if (!Helpers.doesFileExist(SampleDownloaderActivity.this, fileName, xf.mFileSize, false)) return false; fileName = Helpers.generateSaveFileName(SampleDownloaderActivity.this, fileName); ZipResourceFile zrf; byte[] buf = new byte[1024 * 256]; try { zrf = new ZipResourceFile(fileName); ZipEntryRO[] entries = zrf.getAllEntries(); /** * First calculate the total compressed length */ long totalCompressedLength = 0; for (ZipEntryRO entry : entries) { totalCompressedLength += entry.mCompressedLength; } float averageVerifySpeed = 0; long totalBytesRemaining = totalCompressedLength; long timeRemaining; /** * Then calculate a CRC for every file in the * Zip file, comparing it to what is stored in * the Zip directory. Note that for compressed * Zip files we must extract the contents to do * this comparison. */ for (ZipEntryRO entry : entries) { if (-1 != entry.mCRC32) { long length = entry.mUncompressedLength; CRC32 crc = new CRC32(); DataInputStream dis = null; try { dis = new DataInputStream(zrf.getInputStream(entry.mFileName)); long startTime = SystemClock.uptimeMillis(); while (length > 0) { int seek = (int) (length > buf.length ? buf.length : length); dis.readFully(buf, 0, seek); crc.update(buf, 0, seek); length -= seek; long currentTime = SystemClock.uptimeMillis(); long timePassed = currentTime - startTime; if (timePassed > 0) { float currentSpeedSample = (float) seek / (float) timePassed; if (0 != averageVerifySpeed) { averageVerifySpeed = SMOOTHING_FACTOR * currentSpeedSample + (1 - SMOOTHING_FACTOR) * averageVerifySpeed; } else { averageVerifySpeed = currentSpeedSample; } totalBytesRemaining -= seek; timeRemaining = (long) (totalBytesRemaining / averageVerifySpeed); this.publishProgress(new DownloadProgressInfo(totalCompressedLength, totalCompressedLength - totalBytesRemaining, timeRemaining, averageVerifySpeed)); } startTime = currentTime; if (mCancelValidation) return true; } if (crc.getValue() != entry.mCRC32) { Log.e(Constants.TAG, "CRC does not match for entry: " + entry.mFileName); Log.e(Constants.TAG, "In file: " + entry.getZipFileName()); return false; } } finally { if (null != dis) { dis.close(); } } } } } catch (IOException e) { e.printStackTrace(); return false; } } return true; } @Override protected void onProgressUpdate(DownloadProgressInfo... values) { onDownloadProgress(values[0]); super.onProgressUpdate(values); } @Override protected void onPostExecute(Boolean result) { if (result) { mDashboard.setVisibility(View.VISIBLE); mCellMessage.setVisibility(View.GONE); mStatusText.setText(R.string.text_validation_complete); mPauseButton.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { startMovie(); } }); mPauseButton.setText(android.R.string.ok); } else { mDashboard.setVisibility(View.VISIBLE); mCellMessage.setVisibility(View.GONE); mStatusText.setText(R.string.text_validation_failed); mPauseButton.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { finish(); } }); mPauseButton.setText(android.R.string.cancel); } super.onPostExecute(result); } }; validationTask.execute(new Object()); }
From source file:org.klco.email2html.OutputWriter.java
/** * Writes the attachment contained in the body part to a file. * /*from w w w. j a v a2 s . co m*/ * @param containingMessage * the message this body part is contained within * @param part * the part containing the attachment * @return the file that was created/written to * @throws IOException * Signals that an I/O exception has occurred. * @throws MessagingException * the messaging exception */ public boolean writeAttachment(EmailMessage containingMessage, Part part) throws IOException, MessagingException { log.trace("writeAttachment"); File attachmentFolder; File attachmentFile; InputStream in = null; OutputStream out = null; try { attachmentFolder = new File(outputDir.getAbsolutePath() + File.separator + config.getImagesSubDir() + File.separator + FILE_DATE_FORMAT.format(containingMessage.getSentDate())); if (!attachmentFolder.exists()) { log.debug("Creating attachment folder"); attachmentFolder.mkdirs(); } attachmentFile = new File(attachmentFolder, part.getFileName()); log.debug("Writing attachment file: {}", attachmentFile.getAbsolutePath()); if (!attachmentFile.exists()) { attachmentFile.createNewFile(); } in = new BufferedInputStream(part.getInputStream()); out = new BufferedOutputStream(new FileOutputStream(attachmentFile)); log.debug("Downloading attachment"); CRC32 checksum = new CRC32(); for (int b = in.read(); b != -1; b = in.read()) { checksum.update(b); out.write(b); } if (this.excludeDuplicates) { log.debug("Computing checksum"); long value = checksum.getValue(); if (this.attachmentChecksums.contains(value)) { log.info("Skipping duplicate attachment: {}", part.getFileName()); attachmentFile.delete(); return false; } else { attachmentChecksums.add(value); } } log.debug("Attachement saved"); } finally { IOUtils.closeQuietly(out); IOUtils.closeQuietly(in); } if (part.getContentType().toLowerCase().startsWith("image")) { log.debug("Creating renditions"); String contentType = part.getContentType().substring(0, part.getContentType().indexOf(";")); log.debug("Creating renditions of type: " + contentType); for (Rendition rendition : renditions) { File renditionFile = new File(attachmentFolder, rendition.getName() + "-" + part.getFileName()); try { if (!renditionFile.exists()) { renditionFile.createNewFile(); } log.debug("Creating rendition file: {}", renditionFile.getAbsolutePath()); createRendition(attachmentFile, renditionFile, rendition); log.debug("Rendition created"); } catch (OutOfMemoryError oome) { Runtime rt = Runtime.getRuntime(); rt.gc(); log.warn("Ran out of memory creating rendition: " + rendition, oome); log.warn("Free Memory: {}", rt.freeMemory()); log.warn("Max Memory: {}", rt.maxMemory()); log.warn("Total Memory: {}", rt.totalMemory()); String[] command = null; if (rendition.getFill()) { command = new String[] { "convert", attachmentFile.getAbsolutePath(), "-resize", (rendition.getHeight() * 2) + "x", "-resize", "'x" + (rendition.getHeight() * 2) + "<'", "-resize", "50%", "-gravity", "center", "-crop", rendition.getHeight() + "x" + rendition.getWidth() + "+0+0", "+repage", renditionFile.getAbsolutePath() }; } else { command = new String[] { "convert", attachmentFile.getAbsolutePath(), "-resize", rendition.getHeight() + "x" + rendition.getWidth(), renditionFile.getAbsolutePath() }; } log.debug("Trying to resize with ImageMagick: " + StringUtils.join(command, " ")); rt.exec(command); } catch (Exception t) { log.warn("Exception creating rendition: " + rendition, t); } } } return true; }
From source file:com.sastix.cms.server.services.cache.hazelcast.HazelcastCacheService.java
@Override public String getUID(String region) { CRC32 CRC_32 = new CRC32(); LOG.info("HazelcastCacheService->GET_UID"); IdGenerator idGenerator = cm.getIdGenerator(region); final String uid = String.valueOf(idGenerator.newId()); //assures uniqueness during the life cycle of the cluster final String uuid = UUID.randomUUID().toString(); String ret = new StringBuilder(uuid).append(region).append(uid).toString(); CRC_32.reset();/*from w w w.j a v a 2s . c o m*/ CRC_32.update(ret.getBytes()); return Long.toHexString(CRC_32.getValue()); }