List of usage examples for java.util.zip Checksum reset
public void reset();
From source file:com.cisco.dvbu.ps.deploytool.services.RegressionManagerUtils.java
/** * append the checksum value for the entire query to the end of resource URL. * Eliminate any double quote "\"" characters from the URL. * /*from ww w.jav a2 s. c o m*/ * Examples: * incoming from clause outgoing result * ----------------------- ---------------- * CAT1.SCH1.ViewSales --> CAT1.SCH1.ViewSales_1717783081 * * @param query * @param resourceURL * @return resourceURL */ public static String appendUrlChecksum(String query, String resourceURL) { /* 2015-07-06 mtinius - Adding a checksum to the URL allows for unique identification of queries that invoke the same table. * 2015-10-13 mtinius - Moved this code to a separate method from getTableUrl() as it was interfering with the FUNCTIONAL test. * */ // Calculate the CRC for the string to produce a unique identifier Checksum checksum = new CRC32(); long currentLineCheckSumValue = 0L; // Make sure there are no line feeds, carriage returns or double spaces in the query. String queryTmp = query.replace("\n", " ").replaceAll("\r", " ").trim().replaceAll(" ", " "); byte bytes[] = queryTmp.getBytes(); checksum.reset(); checksum.update(bytes, 0, bytes.length); currentLineCheckSumValue = checksum.getValue(); // Rewrite the resource URL to include the query checksum value and make sure there are no double quote "\"" characters present. resourceURL = resourceURL.replaceAll("\"", "") + "_" + currentLineCheckSumValue; return resourceURL; }
From source file:com.cisco.dvbu.ps.common.util.CommonUtils.java
/** * Returns a sum of CRC32 checksums of all lines/rows in a file. * This method is used to compare files with the same lines/rows, which may be in different order, in which case we * still want to consider them equal (from the point of view of containing the same data) * In such case this method will return the same result. * * This is useful when the file contains results of a database query and we need to compare * results of two queries that may return the same data but in different order. * //from www . j a va 2 s. co m * @author SST * @param filePath file name with full path * @return sum of checksums of each line(row) from the input file * The type of this value could be long for files up to probably several GB in size. * BigInteger was chosen in case even bigger files are used. * @throws IOException */ public static BigInteger fileChecksumByRow(String filePath) throws IOException { BigInteger sumOfcheckSumValues = new BigInteger("0"); long currentLineCheckSumValue = 0L; Checksum checksum = new CRC32(); BufferedReader br = new BufferedReader(new FileReader(filePath)); String line; // System.out.println("currentLineCheckSumValue: "); while ((line = br.readLine()) != null) { // Read one line at a time byte bytes[] = line.getBytes(); checksum.reset(); checksum.update(bytes, 0, bytes.length); currentLineCheckSumValue = checksum.getValue(); // System.out.println(currentLineCheckSumValue); sumOfcheckSumValues = sumOfcheckSumValues.add(BigInteger.valueOf(currentLineCheckSumValue)); } br.close(); // System.out.println("fileChecksumByRow(): sumOfcheckSumValues = " + sumOfcheckSumValues); return sumOfcheckSumValues; }
From source file:PngEncoder.java
/** * Writes the IDAT (Image data) chunks to the output stream. * * @param out the OutputStream to write the chunk to * @param csum the Checksum that is updated as data is written * to the passed-in OutputStream * @throws IOException if a problem is encountered writing the output *///from www .ja v a2 s . c om private void writeIdatChunks(OutputStream out, Checksum csum) throws IOException { int rowWidth = width * outputBpp; // size of image data in a row in bytes. int row = 0; Deflater deflater = new Deflater(compressionLevel); ByteArrayOutputStream byteOut = new ByteArrayOutputStream(); DeflaterOutputStream defOut = new DeflaterOutputStream(byteOut, deflater); byte[] filteredPixelQueue = new byte[rowWidth]; // Output Pixel Queues byte[][] outputPixelQueue = new byte[2][rowWidth]; Arrays.fill(outputPixelQueue[1], (byte) 0); int outputPixelQueueRow = 0; int outputPixelQueuePrevRow = 1; while (row < height) { if (filter == null) { defOut.write(0); translator.translate(outputPixelQueue[outputPixelQueueRow], row); defOut.write(outputPixelQueue[outputPixelQueueRow], 0, rowWidth); } else { defOut.write(filter.getType()); translator.translate(outputPixelQueue[outputPixelQueueRow], row); filter.filter(filteredPixelQueue, outputPixelQueue[outputPixelQueueRow], outputPixelQueue[outputPixelQueuePrevRow], outputBpp); defOut.write(filteredPixelQueue, 0, rowWidth); } ++row; outputPixelQueueRow = row & 1; outputPixelQueuePrevRow = outputPixelQueueRow ^ 1; } defOut.finish(); byteOut.close(); writeInt(out, byteOut.size()); csum.reset(); out.write(IDAT); byteOut.writeTo(out); writeInt(out, (int) csum.getValue()); }
From source file:org.apache.nifi.processors.standard.TailFile.java
private void processTailFile(final ProcessContext context, final ProcessSession session, final String tailFile) { // If user changes the file that is being tailed, we need to consume the already-rolled-over data according // to the Initial Start Position property boolean rolloverOccurred; TailFileObject tfo = states.get(tailFile); if (tfo.isTailFileChanged()) { rolloverOccurred = false;/* w w w .j a v a2 s . c o m*/ final String recoverPosition = context.getProperty(START_POSITION).getValue(); if (START_BEGINNING_OF_TIME.getValue().equals(recoverPosition)) { recoverRolledFiles(context, session, tailFile, tfo.getExpectedRecoveryChecksum(), tfo.getState().getTimestamp(), tfo.getState().getPosition()); } else if (START_CURRENT_FILE.getValue().equals(recoverPosition)) { cleanup(); tfo.setState(new TailFileState(tailFile, null, null, 0L, 0L, 0L, null, tfo.getState().getBuffer())); } else { final String filename = tailFile; final File file = new File(filename); try { final FileChannel fileChannel = FileChannel.open(file.toPath(), StandardOpenOption.READ); getLogger().debug("Created FileChannel {} for {}", new Object[] { fileChannel, file }); final Checksum checksum = new CRC32(); final long position = file.length(); final long timestamp = file.lastModified(); try (final InputStream fis = new FileInputStream(file); final CheckedInputStream in = new CheckedInputStream(fis, checksum)) { StreamUtils.copy(in, new NullOutputStream(), position); } fileChannel.position(position); cleanup(); tfo.setState(new TailFileState(filename, file, fileChannel, position, timestamp, file.length(), checksum, tfo.getState().getBuffer())); } catch (final IOException ioe) { getLogger().error( "Attempted to position Reader at current position in file {} but failed to do so due to {}", new Object[] { file, ioe.toString() }, ioe); context.yield(); return; } } tfo.setTailFileChanged(false); } else { // Recover any data that may have rolled over since the last time that this processor ran. // If expectedRecoveryChecksum != null, that indicates that this is the first iteration since processor was started, so use whatever checksum value // was present when the state was last persisted. In this case, we must then null out the value so that the next iteration won't keep using the "recovered" // value. If the value is null, then we know that either the processor has already recovered that data, or there was no state persisted. In either case, // use whatever checksum value is currently in the state. Long expectedChecksumValue = tfo.getExpectedRecoveryChecksum(); if (expectedChecksumValue == null) { expectedChecksumValue = tfo.getState().getChecksum() == null ? null : tfo.getState().getChecksum().getValue(); } rolloverOccurred = recoverRolledFiles(context, session, tailFile, expectedChecksumValue, tfo.getState().getTimestamp(), tfo.getState().getPosition()); tfo.setExpectedRecoveryChecksum(null); } // initialize local variables from state object; this is done so that we can easily change the values throughout // the onTrigger method and then create a new state object after we finish processing the files. TailFileState state = tfo.getState(); File file = state.getFile(); FileChannel reader = state.getReader(); Checksum checksum = state.getChecksum(); if (checksum == null) { checksum = new CRC32(); } long position = state.getPosition(); long timestamp = state.getTimestamp(); long length = state.getLength(); // Create a reader if necessary. if (file == null || reader == null) { file = new File(tailFile); reader = createReader(file, position); if (reader == null) { context.yield(); return; } } final long startNanos = System.nanoTime(); // Check if file has rotated if (rolloverOccurred || (timestamp <= file.lastModified() && length > file.length()) || (timestamp < file.lastModified() && length >= file.length())) { // Since file has rotated, we close the reader, create a new one, and then reset our state. try { reader.close(); getLogger().debug("Closed FileChannel {}", new Object[] { reader, reader }); } catch (final IOException ioe) { getLogger().warn("Failed to close reader for {} due to {}", new Object[] { file, ioe }); } reader = createReader(file, 0L); position = 0L; checksum.reset(); } if (file.length() == position || !file.exists()) { // no data to consume so rather than continually running, yield to allow other processors to use the thread. getLogger().debug("No data to consume; created no FlowFiles"); tfo.setState(new TailFileState(tailFile, file, reader, position, timestamp, length, checksum, state.getBuffer())); persistState(tfo, context); context.yield(); return; } // If there is data to consume, read as much as we can. final TailFileState currentState = state; final Checksum chksum = checksum; // data has been written to file. Stream it to a new FlowFile. FlowFile flowFile = session.create(); final FileChannel fileReader = reader; final AtomicLong positionHolder = new AtomicLong(position); flowFile = session.write(flowFile, new OutputStreamCallback() { @Override public void process(final OutputStream rawOut) throws IOException { try (final OutputStream out = new BufferedOutputStream(rawOut)) { positionHolder.set(readLines(fileReader, currentState.getBuffer(), out, chksum)); } } }); // If there ended up being no data, just remove the FlowFile if (flowFile.getSize() == 0) { session.remove(flowFile); getLogger().debug("No data to consume; removed created FlowFile"); } else { // determine filename for FlowFile by using <base filename of log file>.<initial offset>-<final offset>.<extension> final String tailFilename = file.getName(); final String baseName = StringUtils.substringBeforeLast(tailFilename, "."); final String flowFileName; if (baseName.length() < tailFilename.length()) { flowFileName = baseName + "." + position + "-" + positionHolder.get() + "." + StringUtils.substringAfterLast(tailFilename, "."); } else { flowFileName = baseName + "." + position + "-" + positionHolder.get(); } final Map<String, String> attributes = new HashMap<>(3); attributes.put(CoreAttributes.FILENAME.key(), flowFileName); attributes.put(CoreAttributes.MIME_TYPE.key(), "text/plain"); attributes.put("tailfile.original.path", tailFile); flowFile = session.putAllAttributes(flowFile, attributes); session.getProvenanceReporter().receive(flowFile, file.toURI().toString(), "FlowFile contains bytes " + position + " through " + positionHolder.get() + " of source file", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos)); session.transfer(flowFile, REL_SUCCESS); position = positionHolder.get(); // Set timestamp to the latest of when the file was modified and the current timestamp stored in the state. // We do this because when we read a file that has been rolled over, we set the state to 1 millisecond later than the last mod date // in order to avoid ingesting that file again. If we then read from this file during the same second (or millisecond, depending on the // operating system file last mod precision), then we could set the timestamp to a smaller value, which could result in reading in the // rotated file a second time. timestamp = Math.max(state.getTimestamp(), file.lastModified()); length = file.length(); getLogger().debug("Created {} and routed to success", new Object[] { flowFile }); } // Create a new state object to represent our current position, timestamp, etc. tfo.setState(new TailFileState(tailFile, file, reader, position, timestamp, length, checksum, state.getBuffer())); // We must commit session before persisting state in order to avoid data loss on restart session.commit(); persistState(tfo, context); }