List of usage examples for java.util.zip CRC32 reset
@Override public void reset()
From source file:org.apache.hadoop.raid.XORDecoder.java
@Override protected long fixErasedBlockImpl(FileSystem fs, Path srcFile, FileSystem parityFs, Path parityFile, boolean fixSource, long blockSize, long errorOffset, long limit, boolean partial, OutputStream out, Context context, CRC32 crc, StripeInfo si, boolean recoverFromStripeStore, Block lostBlock) throws IOException { Progressable reporter = context;//from w ww . j a v a2 s.c o m if (reporter == null) { reporter = RaidUtils.NULL_PROGRESSABLE; } if (partial) { throw new IOException("We don't support partial reconstruction"); } LOG.info("Fixing block at " + srcFile + ":" + errorOffset + ", limit " + limit); if (crc != null) { crc.reset(); } FileStatus srcStat = fs.getFileStatus(srcFile); FSDataInputStream[] inputs = new FSDataInputStream[stripeSize + this.codec.parityLength]; try { long errorBlockOffset = (errorOffset / blockSize) * blockSize; long[] srcOffsets = stripeOffsets(errorOffset, blockSize, fixSource); for (int i = 0; i < srcOffsets.length; i++) { if (fixSource && srcOffsets[i] == errorBlockOffset) { inputs[i] = new FSDataInputStream(new RaidUtils.ZeroInputStream(blockSize)); LOG.info("Using zeros at " + srcFile + ":" + errorBlockOffset); continue; } if (srcOffsets[i] < srcStat.getLen()) { FSDataInputStream in = fs.open(srcFile); in.seek(srcOffsets[i]); inputs[i] = in; } else { inputs[i] = new FSDataInputStream(new RaidUtils.ZeroInputStream(blockSize)); LOG.info("Using zeros at " + srcFile + ":" + errorBlockOffset); } } if (fixSource) { FSDataInputStream parityFileIn = parityFs.open(parityFile); parityFileIn.seek(parityOffset(errorOffset, blockSize)); inputs[inputs.length - 1] = parityFileIn; } else { inputs[inputs.length - 1] = new FSDataInputStream(new RaidUtils.ZeroInputStream(blockSize)); LOG.info("Using zeros at " + parityFile + ":" + errorBlockOffset); } } catch (IOException e) { RaidUtils.closeStreams(inputs); throw e; } int boundedBufferCapacity = 1; ParallelStreamReader parallelReader = new ParallelStreamReader(reporter, inputs, bufSize, parallelism, boundedBufferCapacity, blockSize); parallelReader.start(); try { // Loop while the number of skipped + written bytes is less than the max. long written; for (written = 0; written < limit;) { ParallelStreamReader.ReadResult readResult; try { readResult = parallelReader.getReadResult(); } catch (InterruptedException e) { throw new IOException("Interrupted while waiting for read result"); } // Cannot tolerate any IO errors. IOException readEx = readResult.getException(); if (readEx != null) { throw readEx; } int toWrite = (int) Math.min((long) bufSize, limit - written); XOREncoder.xor(readResult.readBufs, writeBufs[0]); out.write(writeBufs[0], 0, toWrite); if (crc != null) { crc.update(writeBufs[0], 0, toWrite); } written += toWrite; } return written; } finally { // Inputs will be closed by parallelReader.shutdown(). parallelReader.shutdown(); } }
From source file:org.kuali.kfs.module.ar.document.service.impl.DunningLetterServiceImpl.java
/** * This method generates the actual pdf files to print. * * @param mapping//from www . j a v a2s . co m * @param form * @param list * @return */ @Override public boolean createZipOfPDFs(byte[] report, ByteArrayOutputStream baos) throws IOException { ZipOutputStream zos = new ZipOutputStream(baos); int bytesRead; byte[] buffer = new byte[1024]; CRC32 crc = new CRC32(); if (ObjectUtils.isNotNull(report)) { BufferedInputStream bis = new BufferedInputStream(new ByteArrayInputStream(report)); crc.reset(); while ((bytesRead = bis.read(buffer)) != -1) { crc.update(buffer, 0, bytesRead); } bis.close(); // Reset to beginning of input stream bis = new BufferedInputStream(new ByteArrayInputStream(report)); ZipEntry entry = new ZipEntry("DunningLetters&Invoices-" + getDateTimeService().toDateStringForFilename(getDateTimeService().getCurrentDate()) + ".pdf"); entry.setMethod(ZipEntry.STORED); entry.setCompressedSize(report.length); entry.setSize(report.length); entry.setCrc(crc.getValue()); zos.putNextEntry(entry); while ((bytesRead = bis.read(buffer)) != -1) { zos.write(buffer, 0, bytesRead); } bis.close(); } zos.close(); return true; }
From source file:io.hops.erasure_coding.Decoder.java
long fixErasedBlockImpl(FileSystem srcFs, Path srcFile, FileSystem parityFs, Path parityFile, boolean fixSource, long blockSize, long errorOffset, long limit, boolean partial, OutputStream out, Progressable reporter, CRC32 crc) throws IOException { long startTime = System.currentTimeMillis(); if (crc != null) { crc.reset(); }/*from w w w .jav a2 s. c o m*/ int blockIdx = (int) (errorOffset / blockSize); LocationPair lp = null; int erasedLocationToFix; if (fixSource) { lp = StripeReader.getBlockLocation(codec, blockIdx); erasedLocationToFix = codec.parityLength + lp.getBlockIdxInStripe(); } else { lp = StripeReader.getParityBlockLocation(codec, blockIdx); erasedLocationToFix = lp.getBlockIdxInStripe(); } FileStatus srcStat = srcFs.getFileStatus(srcFile); FileStatus parityStat = parityFs.getFileStatus(parityFile); InputStream[] inputs = null; List<Integer> erasedLocations = new ArrayList<Integer>(); // Start off with one erased location. erasedLocations.add(erasedLocationToFix); List<Integer> locationsToRead = new ArrayList<Integer>(codec.parityLength + codec.stripeLength); int boundedBufferCapacity = 2; ParallelStreamReader parallelReader = null; LOG.info("Need to write " + limit + " bytes for erased location index " + erasedLocationToFix); long startOffsetInBlock = 0; if (partial) { startOffsetInBlock = errorOffset % blockSize; } // will be resized later int[] erasedLocationsArray = new int[0]; int[] locationsToReadArray = new int[0]; int[] locationsNotToReadArray = new int[0]; try { numReadBytes = 0; long written; // Loop while the number of written bytes is less than the max. for (written = 0; written < limit;) { try { if (parallelReader == null) { long offsetInBlock = written + startOffsetInBlock; StripeReader sReader = StripeReader.getStripeReader(codec, conf, blockSize, srcFs, lp.getStripeIdx(), srcStat); inputs = sReader.buildInputs(srcFs, srcFile, srcStat, parityFs, parityFile, parityStat, lp.getStripeIdx(), offsetInBlock, erasedLocations, locationsToRead, code); /* * locationsToRead have now been populated and erasedLocations * might have been updated with more erased locations. */ LOG.info("Erased locations: " + erasedLocations.toString() + "\nLocations to Read for repair:" + locationsToRead.toString()); /* * Initialize erasedLocationsArray with erasedLocations. */ int i = 0; erasedLocationsArray = new int[erasedLocations.size()]; for (int loc = 0; loc < codec.stripeLength + codec.parityLength; loc++) { if (erasedLocations.indexOf(loc) >= 0) { erasedLocationsArray[i] = loc; i++; } } /* * Initialize locationsToReadArray with locationsToRead. */ i = 0; locationsToReadArray = new int[locationsToRead.size()]; for (int loc = 0; loc < codec.stripeLength + codec.parityLength; loc++) { if (locationsToRead.indexOf(loc) >= 0) { locationsToReadArray[i] = loc; i++; } } i = 0; locationsNotToReadArray = new int[codec.stripeLength + codec.parityLength - locationsToRead.size()]; for (int loc = 0; loc < codec.stripeLength + codec.parityLength; loc++) { if (locationsToRead.indexOf(loc) == -1 || erasedLocations.indexOf(loc) != -1) { locationsNotToReadArray[i] = loc; i++; } } this.writeBufs = new byte[erasedLocations.size()][]; allocateBuffers(); assert (parallelReader == null); parallelReader = new ParallelStreamReader(reporter, inputs, (int) Math.min(bufSize, limit), parallelism, boundedBufferCapacity, Math.min(limit, blockSize)); parallelReader.start(); } ParallelStreamReader.ReadResult readResult = readFromInputs(erasedLocations, limit, reporter, parallelReader); code.decodeBulk(readResult.readBufs, writeBufs, erasedLocationsArray, locationsToReadArray, locationsNotToReadArray); // get the number of bytes read through hdfs. for (int readNum : readResult.numRead) { numReadBytes += readNum; } int toWrite = (int) Math.min((long) bufSize, limit - written); for (int i = 0; i < erasedLocationsArray.length; i++) { if (erasedLocationsArray[i] == erasedLocationToFix) { if (out != null) { out.write(writeBufs[i], 0, toWrite); } if (crc != null) { crc.update(writeBufs[i], 0, toWrite); } written += toWrite; break; } } } catch (IOException e) { if (e instanceof TooManyErasedLocations) { logRaidReconstructionMetrics("FAILURE", 0, codec, System.currentTimeMillis() - startTime, erasedLocations.size(), numReadBytes, srcFile, errorOffset, LOGTYPES.OFFLINE_RECONSTRUCTION, srcFs); throw e; } // Re-create inputs from the new erased locations. if (parallelReader != null) { parallelReader.shutdown(); parallelReader = null; } RaidUtils.closeStreams(inputs); } } logRaidReconstructionMetrics("SUCCESS", written, codec, System.currentTimeMillis() - startTime, erasedLocations.size(), numReadBytes, srcFile, errorOffset, LOGTYPES.OFFLINE_RECONSTRUCTION, srcFs); return written; } finally { numMissingBlocksInStripe = erasedLocations.size(); if (parallelReader != null) { parallelReader.shutdown(); } RaidUtils.closeStreams(inputs); } }
From source file:org.tangram.components.CodeExporter.java
@LinkAction("/codes.zip") public TargetDescriptor codes(HttpServletRequest request, HttpServletResponse response) throws IOException { if (!request.getRequestURI().endsWith(".zip")) { response.sendError(HttpServletResponse.SC_NOT_FOUND); return null; } // if//from w ww .jav a 2 s . c o m if (request.getAttribute(Constants.ATTRIBUTE_ADMIN_USER) == null) { throw new IOException("User may not execute action"); } // if long now = System.currentTimeMillis(); response.setContentType("application/x-zip-compressed"); CRC32 crc = new CRC32(); ZipOutputStream zos = new ZipOutputStream(response.getOutputStream()); zos.setComment("Tangram Repository Codes"); zos.setLevel(9); Collection<CodeResource> codes = codeResourceCache.getCodes(); for (CodeResource code : codes) { if (StringUtils.isNotBlank(code.getAnnotation())) { String mimeType = CodeHelper.getNormalizedMimeType(code.getMimeType()); String folder = CodeHelper.getFolder(mimeType); String extension = CodeHelper.getExtension(mimeType); if (mimeType.startsWith("text/")) { byte[] bytes = code.getCodeText().getBytes("UTF-8"); ZipEntry ze = new ZipEntry(folder + "/" + getFilename(code) + extension); ze.setTime(now); crc.reset(); crc.update(bytes); ze.setCrc(crc.getValue()); zos.putNextEntry(ze); zos.write(bytes); zos.closeEntry(); } // if } // if } // for zos.finish(); zos.close(); return TargetDescriptor.DONE; }
From source file:com.zimbra.cs.zimlet.ZimletUtil.java
private static long computeCRC32(File file) throws IOException { byte buf[] = new byte[32 * 1024]; CRC32 crc = new CRC32(); crc.reset(); FileInputStream fis = null;//from w w w . ja v a 2s .c o m try { fis = new FileInputStream(file); int bytesRead; while ((bytesRead = fis.read(buf)) != -1) { crc.update(buf, 0, bytesRead); } return crc.getValue(); } finally { if (fis != null) { try { fis.close(); } catch (IOException e) { } } } }
From source file:org.apache.hadoop.raid.TestBlockCopier.java
private long[] createRandomFile(Path file, int repl, int numBlocks) throws IOException { long[] crcs = new long[numBlocks]; CRC32 crc = new CRC32(); Random rand = new Random(); FSDataOutputStream stm = fileSys.create(file, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, BLOCK_SIZE); // Write whole blocks. byte[] b = new byte[(int) BLOCK_SIZE]; for (int i = 1; i < numBlocks; i++) { rand.nextBytes(b);/*w w w . ja v a 2 s .c o m*/ stm.write(b); crc.update(b); crcs[i - 1] = crc.getValue(); crc.reset(); } // Write partial block. b = new byte[(int) BLOCK_SIZE / 2 - 1]; rand.nextBytes(b); stm.write(b); crc.update(b); crcs[crcs.length - 1] = crc.getValue(); stm.close(); return crcs;//crc.getValue(); }
From source file:org.apache.isis.objectstore.nosql.db.file.server.FileServer.java
private void syncConnection(final Socket connection, final int readTimeout) { try {/*from w w w . ja v a 2 s .c o m*/ final CRC32 crc32 = new CRC32(); final DataOutput output = new DataOutputStream(connection.getOutputStream()); final DataInput input = new DataInputStream(new CheckedInputStream(connection.getInputStream(), crc32)); if (input.readByte() != INIT) { return; } final LogRange logFileRange = Util.logFileRange(); final long lastId = logFileRange.noLogFile() ? -1 : logFileRange.getLast(); output.writeLong(lastId); do { if (input.readByte() != RECOVERY_LOG) { return; } crc32.reset(); final long logId = input.readLong(); final File file = Util.tmpLogFile(logId); LOG.info("syncing recovery file: " + file.getName()); final BufferedOutputStream fileOutput = new BufferedOutputStream(new FileOutputStream(file)); final byte[] buffer = new byte[8092]; int length; while ((length = input.readInt()) > 0) { input.readFully(buffer, 0, length); fileOutput.write(buffer, 0, length); } fileOutput.close(); final long calculatedChecksum = crc32.getValue(); final long sentChecksum = input.readLong(); if (calculatedChecksum != sentChecksum) { throw new NoSqlStoreException("Checksum didn't match during download of " + file.getName()); } recover(file); final File renameTo = Util.logFile(logId); file.renameTo(renameTo); } while (true); } catch (final NoSqlStoreException e) { LOG.error("file server failure", e); } catch (final IOException e) { LOG.error("networking failure", e); } catch (final RuntimeException e) { LOG.error("request failure", e); } finally { try { connection.close(); } catch (final IOException e) { LOG.warn("failure to close connection", e); } } // TODO restart }
From source file:bobs.is.compress.sevenzip.SevenZOutputFile.java
/** * Finishes the addition of entries to this archive, without closing it. * //from w w w . j a va 2s .co m * @throws IOException if archive is already closed. */ public void finish() throws IOException { if (finished) { throw new IOException("This archive has already been finished"); } finished = true; final long headerPosition = file.getFilePointer(); final ByteArrayOutputStream headerBaos = new ByteArrayOutputStream(); final DataOutputStream header = new DataOutputStream(headerBaos); writeHeader(header); header.flush(); final byte[] headerBytes = headerBaos.toByteArray(); file.write(headerBytes); final CRC32 crc32 = new CRC32(); // signature header file.seek(0); file.write(SevenZFile.sevenZSignature); // version file.write(0); file.write(2); // start header final ByteArrayOutputStream startHeaderBaos = new ByteArrayOutputStream(); final DataOutputStream startHeaderStream = new DataOutputStream(startHeaderBaos); startHeaderStream.writeLong(Long.reverseBytes(headerPosition - SevenZFile.SIGNATURE_HEADER_SIZE)); startHeaderStream.writeLong(Long.reverseBytes(0xffffFFFFL & headerBytes.length)); crc32.reset(); crc32.update(headerBytes); startHeaderStream.writeInt(Integer.reverseBytes((int) crc32.getValue())); startHeaderStream.flush(); final byte[] startHeaderBytes = startHeaderBaos.toByteArray(); crc32.reset(); crc32.update(startHeaderBytes); file.writeInt(Integer.reverseBytes((int) crc32.getValue())); file.write(startHeaderBytes); }
From source file:org.apache.isis.objectstore.nosql.db.file.server.FileServer.java
private void startSyncing() { final String syncHost = config.getString("fileserver.sync-host", DEFAULT_HOST); final int syncPort = config.getInt("fileserver.sync-port", DEFAULT_SYNC_PORT); final int connectionTimeout = config.getInt("fileserver.connection.timeout", 5000); LOG.info("preparing to sync to secondary server on " + syncHost + " port " + syncPort); final InetAddress address; try {/* w w w .jav a 2s .c om*/ address = InetAddress.getByName(syncHost); } catch (final UnknownHostException e) { LOG.error("Unknown host " + syncHost, e); System.exit(0); return; } while (awaitConnections) { Socket socket = null; try { socket = new Socket(address, syncPort); LOG.info("sync connected to " + socket.getInetAddress().getHostAddress() + " port " + socket.getLocalPort()); final CRC32 crc32 = new CRC32(); final DataOutput output = new DataOutputStream( new CheckedOutputStream(socket.getOutputStream(), crc32)); final DataInput input = new DataInputStream(socket.getInputStream()); output.writeByte(INIT); long logId = input.readLong(); do { final long nextLogId = logId + 1; final File file = Util.logFile(nextLogId); if (file.exists() && server.getLogger().isWritten(nextLogId)) { logId++; output.writeByte(RECOVERY_LOG); crc32.reset(); output.writeLong(logId); LOG.info("sending recovery file: " + file.getName()); final BufferedInputStream fileInput = new BufferedInputStream(new FileInputStream(file)); final byte[] buffer = new byte[8092]; int read; while ((read = fileInput.read(buffer)) > 0) { output.writeInt(read); output.write(buffer, 0, read); } output.writeInt(0); output.writeLong(crc32.getValue()); } try { Thread.sleep(300); } catch (final InterruptedException ignore) { } while (isQuiescent) { try { Thread.sleep(300); } catch (final InterruptedException ignore) { } } } while (awaitConnections); } catch (final ConnectException e) { LOG.warn("not yet connected to secondary server at " + syncHost + " port " + syncPort); try { Thread.sleep(connectionTimeout); } catch (final InterruptedException ignore) { } } catch (final IOException e) { LOG.error("start failure - networking not set up for " + syncHost, e); try { Thread.sleep(300); } catch (final InterruptedException ignore) { } } catch (final RuntimeException e) { LOG.error("start failure", e); try { Thread.sleep(300); } catch (final InterruptedException ignore) { } } } }
From source file:org.apache.hadoop.raid.Decoder.java
long fixErasedBlockImpl(FileSystem srcFs, Path srcFile, FileSystem parityFs, Path parityFile, boolean fixSource, long blockSize, long errorOffset, long limit, boolean partial, OutputStream out, Context context, CRC32 crc, StripeInfo si, boolean recoverFromStripeStore, Block lostBlock) throws IOException { Progressable reporter = context;//from w ww . j a va 2s.c o m if (reporter == null) { reporter = RaidUtils.NULL_PROGRESSABLE; } long startTime = System.currentTimeMillis(); long decodingTime = 0; if (crc != null) { crc.reset(); } int blockIdx = (int) (errorOffset / blockSize); LocationPair lp = null; int erasedLocationToFix; if (recoverFromStripeStore) { erasedLocationToFix = si.getBlockIdxInStripe(lostBlock); } else if (fixSource) { lp = StripeReader.getBlockLocation(codec, srcFs, srcFile, blockIdx, conf); erasedLocationToFix = codec.parityLength + lp.getBlockIdxInStripe(); } else { lp = StripeReader.getParityBlockLocation(codec, blockIdx); erasedLocationToFix = lp.getBlockIdxInStripe(); } FileStatus srcStat = srcFs.getFileStatus(srcFile); FileStatus parityStat = null; if (!recoverFromStripeStore) { parityStat = parityFs.getFileStatus(parityFile); } InputStream[] inputs = null; List<Integer> erasedLocations = new ArrayList<Integer>(); // Start off with one erased location. erasedLocations.add(erasedLocationToFix); Set<Integer> locationsToNotRead = new HashSet<Integer>(); int boundedBufferCapacity = 2; ParallelStreamReader parallelReader = null; LOG.info("Need to write " + limit + " bytes for erased location index " + erasedLocationToFix); long startOffsetInBlock = 0; if (partial) { startOffsetInBlock = errorOffset % blockSize; } try { int[] locationsToFix = new int[codec.parityLength]; numReadBytes = 0; numReadBytesRemoteRack = 0; remoteRackFlag = new boolean[codec.parityLength + codec.stripeLength]; for (int id = 0; id < codec.parityLength + codec.stripeLength; id++) { remoteRackFlag[id] = false; } boolean stripeVerified = (si == null); long written; // Loop while the number of written bytes is less than the max. for (written = 0; written < limit;) { try { if (parallelReader == null) { long offsetInBlock = written + startOffsetInBlock; if (recoverFromStripeStore) { inputs = StripeReader.buildInputsFromStripeInfo((DistributedFileSystem) srcFs, srcStat, codec, si, offsetInBlock, limit, erasedLocations, locationsToNotRead, code); } else { StripeReader sReader = StripeReader.getStripeReader(codec, conf, blockSize, srcFs, lp.getStripeIdx(), srcStat); inputs = sReader.buildInputs(srcFs, srcFile, srcStat, parityFs, parityFile, parityStat, lp.getStripeIdx(), offsetInBlock, erasedLocations, locationsToNotRead, code); } int i = 0; for (int location : locationsToNotRead) { locationsToFix[i] = location; i++; } assert (parallelReader == null); parallelReader = new ParallelStreamReader(reporter, inputs, (int) Math.min(bufSize, limit), parallelism, boundedBufferCapacity, Math.min(limit, blockSize)); parallelReader.start(); } ParallelStreamReader.ReadResult readResult = readFromInputs(erasedLocations, limit, reporter, parallelReader); stripeVerified = analysisStream(parallelReader, srcFs, parityFs, stripeVerified, si); //Calculate the number of bytes read from remote rack (through top of rack) for (int i = 0; i < codec.parityLength + codec.stripeLength; i++) { if (remoteRackFlag[i]) { numReadBytesRemoteRack += readResult.numRead[i]; } } if (LOG.isDebugEnabled()) { LOG.debug("Number of bytes read through the top of rack is " + numReadBytesRemoteRack); } long startDecoding = System.currentTimeMillis(); int toWrite = (int) Math.min((long) bufSize, limit - written); doParallelDecoding(toWrite, readResult, parallelCode, locationsToFix); decodingTime += (System.currentTimeMillis() - startDecoding); // get the number of bytes read through hdfs. for (int readNum : readResult.numRead) { numReadBytes += readNum; } for (int i = 0; i < locationsToFix.length; i++) { if (locationsToFix[i] == erasedLocationToFix) { if (out != null) out.write(writeBufs[i], 0, toWrite); if (crc != null) { crc.update(writeBufs[i], 0, toWrite); } written += toWrite; break; } } } catch (IOException e) { LOG.warn("Exception in fixErasedBlockImpl: " + e, e); if (e instanceof TooManyErasedLocations) { LogUtils.logRaidReconstructionMetrics(LOGRESULTS.FAILURE, 0, codec, System.currentTimeMillis() - startTime, decodingTime, erasedLocations.size(), numReadBytes, numReadBytesRemoteRack, (fixSource ? srcFile : parityFile), errorOffset, LOGTYPES.OFFLINE_RECONSTRUCTION_TOO_MANY_CORRUPTIONS, (fixSource ? srcFs : parityFs), e, context, -1); throw e; } else if (e instanceof StripeMismatchException) { LogUtils.logRaidReconstructionMetrics(LOGRESULTS.FAILURE, 0, codec, System.currentTimeMillis() - startTime, erasedLocations.size(), -1, numReadBytes, numReadBytesRemoteRack, (fixSource ? srcFile : parityFile), errorOffset, LOGTYPES.OFFLINE_RECONSTRUCTION_STRIPE_VERIFICATION, (fixSource ? srcFs : parityFs), e, context, -1); throw e; } // Re-create inputs from the new erased locations. if (parallelReader != null) { parallelReader.shutdown(); parallelReader = null; } if (inputs != null) { RaidUtils.closeStreams(inputs); } } } LogUtils.logRaidReconstructionMetrics(LOGRESULTS.SUCCESS, written, codec, System.currentTimeMillis() - startTime, decodingTime, erasedLocations.size(), numReadBytes, numReadBytesRemoteRack, (fixSource ? srcFile : parityFile), errorOffset, LOGTYPES.OFFLINE_RECONSTRUCTION_BLOCK, (fixSource ? srcFs : parityFs), null, context, -1); return written; } finally { numMissingBlocksInStripe = erasedLocations.size(); if (parallelReader != null) { parallelReader.shutdown(); } if (inputs != null) { RaidUtils.closeStreams(inputs); } if (context != null) { context.getCounter(RaidCounter.FILE_FIX_NUM_READBYTES_REMOTERACK).increment(numReadBytesRemoteRack); } } }