Example usage for java.util.zip CRC32 update

List of usage examples for java.util.zip CRC32 update

Introduction

In this page you can find the example usage for java.util.zip CRC32 update.

Prototype

@Override
public void update(byte[] b, int off, int len) 

Source Link

Document

Updates the CRC-32 checksum with the specified array of bytes.

Usage

From source file:io.hops.erasure_coding.XORDecoder.java

@Override
protected long fixErasedBlockImpl(FileSystem fs, Path srcFile, FileSystem parityFs, Path parityFile,
        boolean fixSource, long blockSize, long errorOffset, long limit, boolean partial, OutputStream out,
        Progressable reporter, CRC32 crc) throws IOException {
    if (partial) {
        throw new IOException("We don't support partial reconstruction");
    }/*w  w  w  . ja va2 s  .com*/
    LOG.info("Fixing block at " + srcFile + ":" + errorOffset + ", limit " + limit);
    if (crc != null) {
        crc.reset();
    }
    FileStatus srcStat = fs.getFileStatus(srcFile);
    FSDataInputStream[] inputs = new FSDataInputStream[stripeSize + this.codec.parityLength];

    try {
        long errorBlockOffset = (errorOffset / blockSize) * blockSize;
        long[] srcOffsets = stripeOffsets(errorOffset, blockSize, fixSource);
        for (int i = 0; i < srcOffsets.length; i++) {
            if (fixSource && srcOffsets[i] == errorBlockOffset) {
                inputs[i] = new FSDataInputStream(new RaidUtils.ZeroInputStream(blockSize));
                LOG.info("Using zeros at " + srcFile + ":" + errorBlockOffset);
                continue;
            }
            if (srcOffsets[i] < srcStat.getLen()) {
                FSDataInputStream in = fs.open(srcFile);
                in.seek(srcOffsets[i]);
                inputs[i] = in;
            } else {
                inputs[i] = new FSDataInputStream(new RaidUtils.ZeroInputStream(blockSize));
                LOG.info("Using zeros at " + srcFile + ":" + errorBlockOffset);
            }
        }

        if (fixSource) {
            FSDataInputStream parityFileIn = parityFs.open(parityFile);
            parityFileIn.seek(parityOffset(errorOffset, blockSize));
            inputs[inputs.length - 1] = parityFileIn;
        } else {
            inputs[inputs.length - 1] = new FSDataInputStream(new RaidUtils.ZeroInputStream(blockSize));
            LOG.info("Using zeros at " + parityFile + ":" + errorBlockOffset);
        }
    } catch (IOException e) {
        RaidUtils.closeStreams(inputs);
        throw e;
    }

    int boundedBufferCapacity = 1;
    ParallelStreamReader parallelReader = new ParallelStreamReader(reporter, inputs, bufSize, parallelism,
            boundedBufferCapacity, blockSize);
    parallelReader.start();
    try {
        // Loop while the number of skipped + written bytes is less than the max.
        long written;
        for (written = 0; written < limit;) {
            ParallelStreamReader.ReadResult readResult;
            try {
                readResult = parallelReader.getReadResult();
            } catch (InterruptedException e) {
                throw new IOException("Interrupted while waiting for read result");
            }
            // Cannot tolerate any IO errors.
            IOException readEx = readResult.getException();
            if (readEx != null) {
                throw readEx;
            }

            int toWrite = (int) Math.min((long) bufSize, limit - written);

            XOREncoder.xor(readResult.readBufs, writeBufs[0]);

            out.write(writeBufs[0], 0, toWrite);
            if (crc != null) {
                crc.update(writeBufs[0], 0, toWrite);
            }
            written += toWrite;
        }
        return written;
    } finally {
        // Inputs will be closed by parallelReader.shutdown().
        parallelReader.shutdown();
    }
}

From source file:org.apache.hadoop.raid.XORDecoder.java

@Override
protected long fixErasedBlockImpl(FileSystem fs, Path srcFile, FileSystem parityFs, Path parityFile,
        boolean fixSource, long blockSize, long errorOffset, long limit, boolean partial, OutputStream out,
        Context context, CRC32 crc, StripeInfo si, boolean recoverFromStripeStore, Block lostBlock)
        throws IOException {

    Progressable reporter = context;/*  ww w  . ja va 2 s  .c  o  m*/
    if (reporter == null) {
        reporter = RaidUtils.NULL_PROGRESSABLE;
    }

    if (partial) {
        throw new IOException("We don't support partial reconstruction");
    }
    LOG.info("Fixing block at " + srcFile + ":" + errorOffset + ", limit " + limit);
    if (crc != null) {
        crc.reset();
    }
    FileStatus srcStat = fs.getFileStatus(srcFile);
    FSDataInputStream[] inputs = new FSDataInputStream[stripeSize + this.codec.parityLength];

    try {
        long errorBlockOffset = (errorOffset / blockSize) * blockSize;
        long[] srcOffsets = stripeOffsets(errorOffset, blockSize, fixSource);
        for (int i = 0; i < srcOffsets.length; i++) {
            if (fixSource && srcOffsets[i] == errorBlockOffset) {
                inputs[i] = new FSDataInputStream(new RaidUtils.ZeroInputStream(blockSize));
                LOG.info("Using zeros at " + srcFile + ":" + errorBlockOffset);
                continue;
            }
            if (srcOffsets[i] < srcStat.getLen()) {
                FSDataInputStream in = fs.open(srcFile);
                in.seek(srcOffsets[i]);
                inputs[i] = in;
            } else {
                inputs[i] = new FSDataInputStream(new RaidUtils.ZeroInputStream(blockSize));
                LOG.info("Using zeros at " + srcFile + ":" + errorBlockOffset);
            }
        }

        if (fixSource) {
            FSDataInputStream parityFileIn = parityFs.open(parityFile);
            parityFileIn.seek(parityOffset(errorOffset, blockSize));
            inputs[inputs.length - 1] = parityFileIn;
        } else {
            inputs[inputs.length - 1] = new FSDataInputStream(new RaidUtils.ZeroInputStream(blockSize));
            LOG.info("Using zeros at " + parityFile + ":" + errorBlockOffset);
        }
    } catch (IOException e) {
        RaidUtils.closeStreams(inputs);
        throw e;
    }

    int boundedBufferCapacity = 1;
    ParallelStreamReader parallelReader = new ParallelStreamReader(reporter, inputs, bufSize, parallelism,
            boundedBufferCapacity, blockSize);
    parallelReader.start();
    try {
        // Loop while the number of skipped + written bytes is less than the max.
        long written;
        for (written = 0; written < limit;) {
            ParallelStreamReader.ReadResult readResult;
            try {
                readResult = parallelReader.getReadResult();
            } catch (InterruptedException e) {
                throw new IOException("Interrupted while waiting for read result");
            }
            // Cannot tolerate any IO errors.
            IOException readEx = readResult.getException();
            if (readEx != null) {
                throw readEx;
            }

            int toWrite = (int) Math.min((long) bufSize, limit - written);

            XOREncoder.xor(readResult.readBufs, writeBufs[0]);

            out.write(writeBufs[0], 0, toWrite);
            if (crc != null) {
                crc.update(writeBufs[0], 0, toWrite);
            }
            written += toWrite;
        }
        return written;
    } finally {
        // Inputs will be closed by parallelReader.shutdown().
        parallelReader.shutdown();
    }
}

From source file:org.getlantern.firetweet.util.Utils.java

public static boolean hasAccountSignedWithOfficialKeys(final Context context) {
    if (context == null)
        return false;
    final Cursor cur = ContentResolverUtils.query(context.getContentResolver(), Accounts.CONTENT_URI,
            Accounts.COLUMNS, null, null, null);
    if (cur == null)
        return false;
    final String[] keySecrets = context.getResources()
            .getStringArray(R.array.values_official_consumer_secret_crc32);
    final ParcelableAccount.Indices indices = new ParcelableAccount.Indices(cur);
    cur.moveToFirst();//from  w w  w  .j ava2 s  . c  om
    final CRC32 crc32 = new CRC32();
    try {
        while (!cur.isAfterLast()) {
            final String consumerSecret = cur.getString(indices.consumer_secret);
            if (consumerSecret != null) {
                final byte[] consumerSecretBytes = consumerSecret.getBytes(Charset.forName("UTF-8"));
                crc32.update(consumerSecretBytes, 0, consumerSecretBytes.length);
                final long value = crc32.getValue();
                crc32.reset();
                for (final String keySecret : keySecrets) {
                    if (Long.parseLong(keySecret, 16) == value)
                        return true;
                }
            }
            cur.moveToNext();
        }
    } finally {
        cur.close();
    }
    return false;
}

From source file:org.apache.hadoop.raid.Decoder.java

long fixErasedBlockImpl(FileSystem srcFs, Path srcFile, FileSystem parityFs, Path parityFile, boolean fixSource,
        long blockSize, long errorOffset, long limit, boolean partial, OutputStream out, Context context,
        CRC32 crc, StripeInfo si, boolean recoverFromStripeStore, Block lostBlock) throws IOException {

    Progressable reporter = context;//  w  ww.j a v a  2s  . co  m

    if (reporter == null) {
        reporter = RaidUtils.NULL_PROGRESSABLE;
    }

    long startTime = System.currentTimeMillis();
    long decodingTime = 0;
    if (crc != null) {
        crc.reset();
    }
    int blockIdx = (int) (errorOffset / blockSize);
    LocationPair lp = null;
    int erasedLocationToFix;

    if (recoverFromStripeStore) {
        erasedLocationToFix = si.getBlockIdxInStripe(lostBlock);
    } else if (fixSource) {
        lp = StripeReader.getBlockLocation(codec, srcFs, srcFile, blockIdx, conf);
        erasedLocationToFix = codec.parityLength + lp.getBlockIdxInStripe();
    } else {
        lp = StripeReader.getParityBlockLocation(codec, blockIdx);
        erasedLocationToFix = lp.getBlockIdxInStripe();
    }

    FileStatus srcStat = srcFs.getFileStatus(srcFile);
    FileStatus parityStat = null;
    if (!recoverFromStripeStore) {
        parityStat = parityFs.getFileStatus(parityFile);
    }

    InputStream[] inputs = null;
    List<Integer> erasedLocations = new ArrayList<Integer>();
    // Start off with one erased location.
    erasedLocations.add(erasedLocationToFix);
    Set<Integer> locationsToNotRead = new HashSet<Integer>();

    int boundedBufferCapacity = 2;
    ParallelStreamReader parallelReader = null;
    LOG.info("Need to write " + limit + " bytes for erased location index " + erasedLocationToFix);

    long startOffsetInBlock = 0;
    if (partial) {
        startOffsetInBlock = errorOffset % blockSize;
    }

    try {
        int[] locationsToFix = new int[codec.parityLength];
        numReadBytes = 0;
        numReadBytesRemoteRack = 0;
        remoteRackFlag = new boolean[codec.parityLength + codec.stripeLength];
        for (int id = 0; id < codec.parityLength + codec.stripeLength; id++) {
            remoteRackFlag[id] = false;
        }
        boolean stripeVerified = (si == null);
        long written;
        // Loop while the number of written bytes is less than the max.
        for (written = 0; written < limit;) {
            try {
                if (parallelReader == null) {
                    long offsetInBlock = written + startOffsetInBlock;
                    if (recoverFromStripeStore) {
                        inputs = StripeReader.buildInputsFromStripeInfo((DistributedFileSystem) srcFs, srcStat,
                                codec, si, offsetInBlock, limit, erasedLocations, locationsToNotRead, code);
                    } else {
                        StripeReader sReader = StripeReader.getStripeReader(codec, conf, blockSize, srcFs,
                                lp.getStripeIdx(), srcStat);
                        inputs = sReader.buildInputs(srcFs, srcFile, srcStat, parityFs, parityFile, parityStat,
                                lp.getStripeIdx(), offsetInBlock, erasedLocations, locationsToNotRead, code);
                    }
                    int i = 0;
                    for (int location : locationsToNotRead) {
                        locationsToFix[i] = location;
                        i++;
                    }

                    assert (parallelReader == null);
                    parallelReader = new ParallelStreamReader(reporter, inputs, (int) Math.min(bufSize, limit),
                            parallelism, boundedBufferCapacity, Math.min(limit, blockSize));
                    parallelReader.start();
                }
                ParallelStreamReader.ReadResult readResult = readFromInputs(erasedLocations, limit, reporter,
                        parallelReader);

                stripeVerified = analysisStream(parallelReader, srcFs, parityFs, stripeVerified, si);

                //Calculate the number of bytes read from remote rack (through top of rack)
                for (int i = 0; i < codec.parityLength + codec.stripeLength; i++) {
                    if (remoteRackFlag[i]) {
                        numReadBytesRemoteRack += readResult.numRead[i];
                    }
                }

                if (LOG.isDebugEnabled()) {
                    LOG.debug("Number of bytes read through the top of rack is " + numReadBytesRemoteRack);
                }

                long startDecoding = System.currentTimeMillis();
                int toWrite = (int) Math.min((long) bufSize, limit - written);
                doParallelDecoding(toWrite, readResult, parallelCode, locationsToFix);
                decodingTime += (System.currentTimeMillis() - startDecoding);

                // get the number of bytes read through hdfs.
                for (int readNum : readResult.numRead) {
                    numReadBytes += readNum;
                }

                for (int i = 0; i < locationsToFix.length; i++) {
                    if (locationsToFix[i] == erasedLocationToFix) {
                        if (out != null)
                            out.write(writeBufs[i], 0, toWrite);
                        if (crc != null) {
                            crc.update(writeBufs[i], 0, toWrite);
                        }
                        written += toWrite;
                        break;
                    }
                }
            } catch (IOException e) {
                LOG.warn("Exception in fixErasedBlockImpl: " + e, e);
                if (e instanceof TooManyErasedLocations) {
                    LogUtils.logRaidReconstructionMetrics(LOGRESULTS.FAILURE, 0, codec,
                            System.currentTimeMillis() - startTime, decodingTime, erasedLocations.size(),
                            numReadBytes, numReadBytesRemoteRack, (fixSource ? srcFile : parityFile),
                            errorOffset, LOGTYPES.OFFLINE_RECONSTRUCTION_TOO_MANY_CORRUPTIONS,
                            (fixSource ? srcFs : parityFs), e, context, -1);
                    throw e;
                } else if (e instanceof StripeMismatchException) {
                    LogUtils.logRaidReconstructionMetrics(LOGRESULTS.FAILURE, 0, codec,
                            System.currentTimeMillis() - startTime, erasedLocations.size(), -1, numReadBytes,
                            numReadBytesRemoteRack, (fixSource ? srcFile : parityFile), errorOffset,
                            LOGTYPES.OFFLINE_RECONSTRUCTION_STRIPE_VERIFICATION, (fixSource ? srcFs : parityFs),
                            e, context, -1);
                    throw e;
                }
                // Re-create inputs from the new erased locations.
                if (parallelReader != null) {
                    parallelReader.shutdown();
                    parallelReader = null;
                }
                if (inputs != null) {
                    RaidUtils.closeStreams(inputs);
                }
            }
        }

        LogUtils.logRaidReconstructionMetrics(LOGRESULTS.SUCCESS, written, codec,
                System.currentTimeMillis() - startTime, decodingTime, erasedLocations.size(), numReadBytes,
                numReadBytesRemoteRack, (fixSource ? srcFile : parityFile), errorOffset,
                LOGTYPES.OFFLINE_RECONSTRUCTION_BLOCK, (fixSource ? srcFs : parityFs), null, context, -1);
        return written;
    } finally {
        numMissingBlocksInStripe = erasedLocations.size();
        if (parallelReader != null) {
            parallelReader.shutdown();
        }
        if (inputs != null) {
            RaidUtils.closeStreams(inputs);
        }
        if (context != null) {
            context.getCounter(RaidCounter.FILE_FIX_NUM_READBYTES_REMOTERACK).increment(numReadBytesRemoteRack);
        }
    }
}

From source file:io.hops.erasure_coding.Decoder.java

long fixErasedBlockImpl(FileSystem srcFs, Path srcFile, FileSystem parityFs, Path parityFile, boolean fixSource,
        long blockSize, long errorOffset, long limit, boolean partial, OutputStream out, Progressable reporter,
        CRC32 crc) throws IOException {
    long startTime = System.currentTimeMillis();
    if (crc != null) {
        crc.reset();/*from   w  w  w . j  av a  2  s  . c  om*/
    }
    int blockIdx = (int) (errorOffset / blockSize);
    LocationPair lp = null;
    int erasedLocationToFix;
    if (fixSource) {
        lp = StripeReader.getBlockLocation(codec, blockIdx);
        erasedLocationToFix = codec.parityLength + lp.getBlockIdxInStripe();
    } else {
        lp = StripeReader.getParityBlockLocation(codec, blockIdx);
        erasedLocationToFix = lp.getBlockIdxInStripe();
    }

    FileStatus srcStat = srcFs.getFileStatus(srcFile);
    FileStatus parityStat = parityFs.getFileStatus(parityFile);

    InputStream[] inputs = null;
    List<Integer> erasedLocations = new ArrayList<Integer>();
    // Start off with one erased location.
    erasedLocations.add(erasedLocationToFix);
    List<Integer> locationsToRead = new ArrayList<Integer>(codec.parityLength + codec.stripeLength);

    int boundedBufferCapacity = 2;
    ParallelStreamReader parallelReader = null;
    LOG.info("Need to write " + limit + " bytes for erased location index " + erasedLocationToFix);

    long startOffsetInBlock = 0;
    if (partial) {
        startOffsetInBlock = errorOffset % blockSize;
    }

    // will be resized later
    int[] erasedLocationsArray = new int[0];
    int[] locationsToReadArray = new int[0];
    int[] locationsNotToReadArray = new int[0];

    try {
        numReadBytes = 0;
        long written;
        // Loop while the number of written bytes is less than the max.
        for (written = 0; written < limit;) {
            try {
                if (parallelReader == null) {
                    long offsetInBlock = written + startOffsetInBlock;
                    StripeReader sReader = StripeReader.getStripeReader(codec, conf, blockSize, srcFs,
                            lp.getStripeIdx(), srcStat);
                    inputs = sReader.buildInputs(srcFs, srcFile, srcStat, parityFs, parityFile, parityStat,
                            lp.getStripeIdx(), offsetInBlock, erasedLocations, locationsToRead, code);

                    /*
                     * locationsToRead have now been populated and erasedLocations
                     * might have been updated with more erased locations.
                     */
                    LOG.info("Erased locations: " + erasedLocations.toString()
                            + "\nLocations to Read for repair:" + locationsToRead.toString());

                    /*
                     * Initialize erasedLocationsArray with erasedLocations.
                     */
                    int i = 0;
                    erasedLocationsArray = new int[erasedLocations.size()];
                    for (int loc = 0; loc < codec.stripeLength + codec.parityLength; loc++) {
                        if (erasedLocations.indexOf(loc) >= 0) {
                            erasedLocationsArray[i] = loc;
                            i++;
                        }
                    }

                    /*
                     * Initialize locationsToReadArray with locationsToRead.
                     */
                    i = 0;
                    locationsToReadArray = new int[locationsToRead.size()];
                    for (int loc = 0; loc < codec.stripeLength + codec.parityLength; loc++) {
                        if (locationsToRead.indexOf(loc) >= 0) {
                            locationsToReadArray[i] = loc;
                            i++;
                        }
                    }

                    i = 0;
                    locationsNotToReadArray = new int[codec.stripeLength + codec.parityLength
                            - locationsToRead.size()];

                    for (int loc = 0; loc < codec.stripeLength + codec.parityLength; loc++) {
                        if (locationsToRead.indexOf(loc) == -1 || erasedLocations.indexOf(loc) != -1) {
                            locationsNotToReadArray[i] = loc;
                            i++;
                        }
                    }

                    this.writeBufs = new byte[erasedLocations.size()][];
                    allocateBuffers();

                    assert (parallelReader == null);
                    parallelReader = new ParallelStreamReader(reporter, inputs, (int) Math.min(bufSize, limit),
                            parallelism, boundedBufferCapacity, Math.min(limit, blockSize));
                    parallelReader.start();
                }
                ParallelStreamReader.ReadResult readResult = readFromInputs(erasedLocations, limit, reporter,
                        parallelReader);

                code.decodeBulk(readResult.readBufs, writeBufs, erasedLocationsArray, locationsToReadArray,
                        locationsNotToReadArray);

                // get the number of bytes read through hdfs.
                for (int readNum : readResult.numRead) {
                    numReadBytes += readNum;
                }

                int toWrite = (int) Math.min((long) bufSize, limit - written);
                for (int i = 0; i < erasedLocationsArray.length; i++) {
                    if (erasedLocationsArray[i] == erasedLocationToFix) {
                        if (out != null) {
                            out.write(writeBufs[i], 0, toWrite);
                        }
                        if (crc != null) {
                            crc.update(writeBufs[i], 0, toWrite);
                        }
                        written += toWrite;
                        break;
                    }
                }
            } catch (IOException e) {
                if (e instanceof TooManyErasedLocations) {
                    logRaidReconstructionMetrics("FAILURE", 0, codec, System.currentTimeMillis() - startTime,
                            erasedLocations.size(), numReadBytes, srcFile, errorOffset,
                            LOGTYPES.OFFLINE_RECONSTRUCTION, srcFs);
                    throw e;
                }
                // Re-create inputs from the new erased locations.
                if (parallelReader != null) {
                    parallelReader.shutdown();
                    parallelReader = null;
                }
                RaidUtils.closeStreams(inputs);
            }
        }
        logRaidReconstructionMetrics("SUCCESS", written, codec, System.currentTimeMillis() - startTime,
                erasedLocations.size(), numReadBytes, srcFile, errorOffset, LOGTYPES.OFFLINE_RECONSTRUCTION,
                srcFs);
        return written;
    } finally {
        numMissingBlocksInStripe = erasedLocations.size();
        if (parallelReader != null) {
            parallelReader.shutdown();
        }
        RaidUtils.closeStreams(inputs);
    }
}