Example usage for org.apache.commons.codec.digest DigestUtils md5

List of usage examples for org.apache.commons.codec.digest DigestUtils md5

Introduction

In this page you can find the example usage for org.apache.commons.codec.digest DigestUtils md5.

Prototype

public static byte[] md5(String data) 

Source Link

Usage

From source file:org.jajuk.services.core.PersistenceService.java

private String getQueueModelChecksum() {
    StringBuilder sb = new StringBuilder();
    for (StackItem item : QueueModel.getQueue()) {
        sb.append(item.getFile().getID());
    }/* www .  ja v  a  2  s .  co  m*/
    // Do not use MD5Processor class here to avoid the intern() method that 
    // could create a permgen memory leak
    byte[] checksum = DigestUtils.md5(sb.toString());
    return new String(checksum);
}

From source file:org.mapsforge.map.writer.automatization.OsmosisRunner.java

private static void createMD5Files() throws IOException {
    // for each file of the MD5List a file would be generated
    for (final String file : MD5LIST) {
        File mapFile = new File(file);
        File md5File = new File(mapFile.getParentFile(), mapFile.getName() + ".md5");

        byte[] md5 = DigestUtils.md5(new FileInputStream(mapFile));
        FileUtils.writeByteArrayToFile(md5File, md5);
    }// w  w  w  .j a v a 2 s . c o m
}

From source file:org.michaelevans.etsyblur.utils.Utils.java

private static File generateFile(Activity context) {
    String timestamp = String.valueOf(System.currentTimeMillis());
    String md5 = String.valueOf(DigestUtils.md5(timestamp));
    File localFile = getCacheDir(context, "img_cache");
    if (!localFile.exists())
        localFile.mkdirs();/*from  w  w  w. ja  v a 2 s  .c  o m*/
    return new File(localFile, md5);
}

From source file:org.ros.internal.message.Md5Generator.java

public String generate(String messageType) {
    String messageDefinition = messageDefinitionProvider.get(messageType);
    Preconditions.checkNotNull(messageDefinition, "No definition for message type: " + messageType);
    List<String> parts = MessageDefinitionTupleParser.parse(messageDefinition, -1);
    StringBuilder text = new StringBuilder();
    for (String part : parts) {
        text.append(generateText(messageType, part));
    }//from   w w w. j a v a2 s  .com
    byte[] md5 = DigestUtils.md5(text.toString());
    return new String(encodeHex(md5, DIGITS_LOWER));
}

From source file:org.saadahmed.snowcrystal.SnowCrystal.java

public static String md5Base64UrlSafe() {
    return Base64.encodeBase64URLSafeString(DigestUtils.md5(SnowCrystal.newId().unwrap()));
}

From source file:org.sonar.batch.index.SourcePersisterTest.java

private byte[] md5(String string) {
    return DigestUtils.md5(string);
}

From source file:org.sonar.batch.scan.filesystem.InputPathCacheTest.java

@Test
public void should_add_input_file() throws Exception {
    InputPathCache cache = new InputPathCache(caches);
    DefaultInputFile fooFile = new DefaultInputFile("foo", "src/main/java/Foo.java")
            .setFile(temp.newFile("Foo.java"));
    cache.put("struts", fooFile);
    cache.put("struts-core",
            new DeprecatedDefaultInputFile("foo", "src/main/java/Bar.java").setBasedir(temp.newFolder())
                    .setDeprecatedKey("foo").setSourceDirAbsolutePath("foo").setPathRelativeToSourceDir("foo")
                    .setLanguage("bla").setType(Type.MAIN).setStatus(Status.ADDED).setHash("xyz").setLines(2)
                    .setEncoding("UTF-8").setOriginalLineOffsets(new long[] { 0, 4 })
                    .setLineHashes(new byte[][] { DigestUtils.md5("foo"), DigestUtils.md5("bar") })
                    .setFile(temp.newFile("Bar.java")));

    DefaultInputFile loadedFile = (DefaultInputFile) cache.getFile("struts-core", "src/main/java/Bar.java");
    assertThat(loadedFile.relativePath()).isEqualTo("src/main/java/Bar.java");
    assertThat(loadedFile.encoding()).isEqualTo("UTF-8");
    assertThat(loadedFile.originalLineOffsets()).containsOnly(0, 4);
    assertThat(loadedFile.lineHashes()[0]).containsOnly(DigestUtils.md5("foo"));
    assertThat(loadedFile.lineHashes()[1]).containsOnly(DigestUtils.md5("bar"));

    assertThat(cache.filesByModule("struts")).hasSize(1);
    assertThat(cache.filesByModule("struts-core")).hasSize(1);
    assertThat(cache.all()).hasSize(2);//from w  ww.ja  va2 s.c  o  m
    for (InputPath inputPath : cache.all()) {
        assertThat(inputPath.relativePath()).startsWith("src/main/java/");
    }

    cache.remove("struts", fooFile);
    assertThat(cache.all()).hasSize(1);

    cache.removeModule("struts");
    assertThat(cache.filesByModule("struts")).hasSize(0);
    assertThat(cache.filesByModule("struts-core")).hasSize(1);
    assertThat(cache.all()).hasSize(1);
}

From source file:org.sonar.plugins.core.issue.IssueTrackingDecoratorTest.java

private byte[][] computeHashes(String source) {
    String[] lines = source.split("\n");
    byte[][] hashes = new byte[lines.length][];
    for (int i = 0; i < lines.length; i++) {
        hashes[i] = DigestUtils.md5(lines[i].replaceAll("[\t ]", ""));
    }//www  .j a  v  a  2 s  . c om
    return hashes;
}

From source file:org.stem.CompactionTest.java

@Test
public void testCompaction() throws Exception {
    clusterManagerClient.computeMapping();
    StemClient client = new StemClient();
    client.start();/* w  w w.ja  va  2s  . c  o  m*/

    DataTracker dt = getFirstDisk().getDataTracker();

    final int BLOBS_NUM = 79 + 79 + 79 + 1; // 5MB fat file may have 79 of 65KB blobs.
    final int BLOB_SIZE = 65536;

    assert dt.getTotalBlobs() == 0;
    assert dt.getLiveBlobs() == 0;
    assert dt.getLiveSizeInBytes() == 0;
    assert dt.getTotalSizeInBytes() == 0;
    assert dt.getDeletesCount() == 0;
    assert dt.getDeletesCount(0) == 0;
    assert dt.getDeletesCount(1) == 0;
    assert dt.getDeletesCount(2) == 0;
    assert dt.getDeletesCount(3) == 0;
    assert dt.getDeletesSizeInBytes(0) == 0;
    assert dt.getDeletesSizeInBytes(1) == 0;
    assert dt.getDeletesSizeInBytes(2) == 0;
    assert dt.getDeletesSizeInBytes(3) == 0;
    assert dt.getDeletesSizeInBytes() == 0;
    assert dt.getBlankFatFileCount() == 20;
    assert dt.getFullFatFileCount() == 0;
    assert dt.getFatFileCount() == 20;
    assert getWriteCandidates() == 19;

    // Perform WRITE
    List<byte[]> keysGenerated = generateRandomLoad(BLOBS_NUM); // put 238 blobs
    byte[] before = client.get(keysGenerated.get(96));

    assert dt.getTotalBlobs() == BLOBS_NUM;
    assert dt.getLiveBlobs() == BLOBS_NUM;
    assert dt.getLiveSizeInBytes() == BLOBS_NUM * BLOB_SIZE;
    assert dt.getTotalSizeInBytes() == BLOBS_NUM * BLOB_SIZE;
    assert dt.getDeletesCount() == 0;
    assert dt.getDeletesCount(0) == 0;
    assert dt.getDeletesCount(1) == 0;
    assert dt.getDeletesCount(2) == 0;
    assert dt.getDeletesCount(3) == 0;
    assert dt.getDeletesSizeInBytes(0) == 0;
    assert dt.getDeletesSizeInBytes(1) == 0;
    assert dt.getDeletesSizeInBytes(2) == 0;
    assert dt.getDeletesSizeInBytes(3) == 0;
    assert dt.getDeletesSizeInBytes() == 0;
    assert dt.getBlankFatFileCount() == 16;
    assert dt.getFullFatFileCount() == 3;
    assert dt.getFatFileCount() == 20;
    assert getWriteCandidates() == 16;

    int deletes = (int) Math.ceil(BLOBS_NUM * StorageNodeDescriptor.getCompactionThreshold() * 4);
    // Delete some data
    for (int i = 0; i < deletes; i++) // Delete 40% of data
    {
        client.delete(keysGenerated.get(i));
    }

    byte[] before2 = client.get(keysGenerated.get(96));

    assert dt.getTotalBlobs() == BLOBS_NUM;
    assert dt.getLiveBlobs() == BLOBS_NUM - deletes;
    assert dt.getLiveSizeInBytes() == (BLOBS_NUM - deletes) * BLOB_SIZE;
    assert dt.getTotalSizeInBytes() == BLOBS_NUM * BLOB_SIZE;
    assert dt.getDeletesCount() == deletes;
    assert dt.getDeletesCount(0) == 79;
    assert dt.getDeletesCount(1) == 96 - 79;
    assert dt.getDeletesCount(2) == 0;
    assert dt.getDeletesCount(3) == 0;
    assert dt.getDeletesSizeInBytes(0) == 79 * BLOB_SIZE;
    assert dt.getDeletesSizeInBytes(1) == (96 - 79) * BLOB_SIZE;
    assert dt.getDeletesSizeInBytes(2) == 0;
    assert dt.getDeletesSizeInBytes(3) == 0;
    assert dt.getDeletesSizeInBytes() == deletes * BLOB_SIZE;
    assert dt.getBlankFatFileCount() == 16;
    assert dt.getFullFatFileCount() == 3;
    assert dt.getFatFileCount() == 20;
    assert getWriteCandidates() == 16;

    // Perform compaction
    CompactionManager.instance.performMajorCompaction();
    byte[] after = client.get(keysGenerated.get(96));

    assert dt.getTotalBlobs() == BLOBS_NUM;
    assert dt.getLiveBlobs() == BLOBS_NUM - deletes;
    assert dt.getLiveSizeInBytes() == (BLOBS_NUM - deletes) * BLOB_SIZE;
    assert dt.getDeletesCount() == 0;
    assert dt.getDeletesCount(0) == 0;
    assert dt.getDeletesCount(1) == 0;
    assert dt.getDeletesCount(2) == 0;
    assert dt.getDeletesCount(3) == 0;
    assert dt.getDeletesSizeInBytes(0) == 0;
    assert dt.getDeletesSizeInBytes(1) == 0;
    assert dt.getDeletesSizeInBytes(2) == 0;
    assert dt.getDeletesSizeInBytes(3) == 0;
    assert dt.getDeletesSizeInBytes() == 0;
    assert dt.getBlankFatFileCount() == 18;
    assert dt.getFullFatFileCount() == 1;
    assert dt.getFatFileCount() == 20;
    assert getWriteCandidates() == 18;

    // Control validation. Read all the dataset we put
    for (int i = 0; i < BLOBS_NUM; i++) {
        byte[] keyOrig = keysGenerated.get(i);
        byte[] data = client.get(keyOrig);
        if (i < deletes) {
            assert data == null;
            continue;
        }

        byte[] keyActual = DigestUtils.md5(data);
        Assert.assertArrayEquals(keyActual, keyOrig);
    }

    // If we here then data have been migrated correctly,we got two new BLANK files for writes
    // and all the data has not been corrupted
}

From source file:org.stem.IntegrationTestBase.java

protected WriteBlobMessage getRandomWriteMessage() {
    byte[] blob = TestUtils.generateRandomBlob(65536);
    byte[] key = DigestUtils.md5(blob);
    UUID disk = getFirstDiskUUID();

    WriteBlobMessage op = new WriteBlobMessage();
    op.disk = disk;//from  w ww .jav a2 s.c om
    op.key = key;
    op.blob = blob;

    return op;
}