Example usage for java.util Map clear

List of usage examples for java.util Map clear

Introduction

In this page you can find the example usage for java.util Map clear.

Prototype

void clear();

Source Link

Document

Removes all of the mappings from this map (optional operation).

Usage

From source file:org.wikipedia.nirvana.statistics.Rating.java

private void calcProgress() {
    Path startingDir = Paths.get(Statistics.cacheFolder);
    String pattern = FileTools.normalizeFileName(portal) + "." + this.type + ".????-??-??.js";

    Finder finder = new Finder(pattern);
    //Files.w//from  www  . j a v  a 2s  .c o m
    try {
        Files.walkFileTree(startingDir, EnumSet.noneOf(FileVisitOption.class), 1, finder);
    } catch (IOException e) {
        log.error(e.toString());
        e.printStackTrace();
    }
    String file = finder.getNewestFile();
    Map<String, Integer> data = new HashMap<String, Integer>(30);

    if (file != null) {

        ObjectMapper mapper = new ObjectMapper();
        //List<ArchiveItem> list = null;
        //          File file = new File(prevResultFile);
        //          if(!file.exists()) {
        //             log.warn("file "+dbPath+" does not exist");
        //             return;
        //          }
        try {
            data = mapper.readValue(new File(startingDir + "\\" + file),
                    new TypeReference<Map<String, Integer>>() {
                    });
        } catch (JsonParseException e) {
            log.error(e);
        } catch (JsonMappingException e) {
            log.error(e);
        } catch (IOException e) {
            log.error(e);
        }
        if (data != null) {
            for (StatItem item : this.items) {
                Integer n = data.get(item.user);
                if (n != null) {
                    item.progress = -(item.number - n); /// smaller value means progress
                }
            }
        }
    }

    data.clear();
    for (StatItem item : this.items) {
        data.put(item.user, item.number);
    }
    file = Statistics.cacheFolder + "\\" + String.format("%1$s.%2$s.%3$tF.js",
            FileTools.normalizeFileName(Statistics.portal), type, Calendar.getInstance());
    ObjectMapper mapper = new ObjectMapper();
    try {
        mapper.writeValue(new File(file), data);
    } catch (JsonParseException e) {
        log.error(e);
        return;
    } catch (JsonMappingException e) {
        log.error(e);
        return;
    } catch (IOException e) {
        log.error(e);
        return;
    }

}

From source file:com.amazonaws.services.kinesis.aggregators.StreamAggregatorUtils.java

/**
 * Get a list of all Open shards ordered by their start hash
 * /*from   w  w w . j a  v  a2  s . c o m*/
 * @param streamName
 * @return A Map of only Open Shards indexed by the Shard ID
 */
public static Map<String, Shard> getOpenShards(AmazonKinesisClient kinesisClient, String streamName)
        throws Exception {
    Map<String, Shard> shardMap = new LinkedHashMap<>();
    final int BACKOFF_MILLIS = 10;
    final int MAX_DESCRIBE_ATTEMPTS = 10;
    int describeAttempts = 0;
    StreamDescription stream = null;
    try {
        do {
            try {
                stream = kinesisClient.describeStream(streamName).getStreamDescription();
            } catch (LimitExceededException e) {
                Thread.sleep(2 ^ describeAttempts * BACKOFF_MILLIS);
                describeAttempts++;
            }
        } while (stream == null && describeAttempts < MAX_DESCRIBE_ATTEMPTS);
    } catch (InterruptedException e) {
        LOG.error(e);
        throw e;
    }

    if (stream == null) {
        throw new Exception(
                String.format("Unable to describe Stream after %s attempts", MAX_DESCRIBE_ATTEMPTS));
    }
    Collection<String> openShardNames = new ArrayList<String>();

    // load all the shards on the stream
    for (Shard shard : stream.getShards()) {
        openShardNames.add(shard.getShardId());
        shardMap.put(shard.getShardId(), shard);

        // remove this shard's parents from the set of active shards -
        // we
        // can't do anything to them
        if (shard.getParentShardId() != null) {
            openShardNames.remove(shard.getParentShardId());
        }
        if (shard.getAdjacentParentShardId() != null) {
            openShardNames.remove(shard.getAdjacentParentShardId());
        }
    }

    // create a List of Open shards for sorting
    List<Shard> shards = new ArrayList<Shard>();
    for (String s : openShardNames) {
        shards.add(shardMap.get(s));
    }

    // sort the list into lowest start hash order
    Collections.sort(shards, new Comparator<Shard>() {
        public int compare(Shard o1, Shard o2) {
            return new BigInteger(o1.getHashKeyRange().getStartingHashKey())
                    .compareTo(new BigInteger(o2.getHashKeyRange().getStartingHashKey()));
        }
    });

    // rebuild the shard map into the correct order
    shardMap.clear();
    for (Shard s : shards) {
        shardMap.put(s.getShardId(), s);
    }

    return shardMap;

}

From source file:com.ibm.jaggr.core.impl.transport.RequestedModuleNamesTest.java

@SuppressWarnings("deprecation")
@Test//from w  w w .  j ava 2 s .  c o m
public void testRequestedModuleNamesExceptions() throws Exception {
    IAggregator mockAggregator = TestUtils.createMockAggregator();
    Map<String, Object> requestAttributes = new HashMap<String, Object>();
    Map<String, String[]> requestParams = new HashMap<String, String[]>();
    HttpServletRequest request = TestUtils.createMockRequest(mockAggregator, requestAttributes, requestParams,
            null, null);
    EasyMock.replay(mockAggregator, request);

    // test exceptions with scripts param
    requestParams.put(AbstractHttpTransport.SCRIPTS_REQPARAM, new String[] { "script/a" });
    requestParams.put(AbstractHttpTransport.REQUESTEDMODULES_REQPARAM, new String[] { "module/a" });
    try {
        new RequestedModuleNames(request, null, null);
        fail("Expected exception");
    } catch (BadRequestException ex) {
    }
    requestParams.remove(AbstractHttpTransport.REQUESTEDMODULES_REQPARAM);
    requestParams.put(AbstractHttpTransport.REQUIRED_REQPARAM, new String[] { "required/a" });
    try {
        new RequestedModuleNames(request, null, null);
        fail("Expected exception");
    } catch (BadRequestException ex) {
    }

    // test exceptions with deps param
    requestAttributes.clear();
    requestAttributes.put(IAggregator.AGGREGATOR_REQATTRNAME, mockAggregator);
    requestParams.clear();
    requestParams.put(AbstractHttpTransport.DEPS_REQPARAM, new String[] { "deps/a" });
    requestParams.put(AbstractHttpTransport.REQUESTEDMODULES_REQPARAM, new String[] { "module/a" });
    try {
        new RequestedModuleNames(request, null, null);
        fail("Expected exception");
    } catch (BadRequestException ex) {
    }
    requestParams.remove(AbstractHttpTransport.REQUESTEDMODULES_REQPARAM);
    requestParams.put(AbstractHttpTransport.REQUIRED_REQPARAM, new String[] { "required/a" });
    try {
        new RequestedModuleNames(request, null, null);
        fail("Expected exception");
    } catch (BadRequestException ex) {
    }

    // test exceptions with preloads param
    requestAttributes.clear();
    requestAttributes.put(IAggregator.AGGREGATOR_REQATTRNAME, mockAggregator);
    requestParams.clear();
    requestParams.put(AbstractHttpTransport.PRELOADS_REQPARAM, new String[] { "preloads/a" });
    requestParams.put(AbstractHttpTransport.REQUESTEDMODULES_REQPARAM, new String[] { "module/a" });
    try {
        new RequestedModuleNames(request, null, null);
        fail("Expected exception");
    } catch (BadRequestException ex) {
    }
    requestParams.remove(AbstractHttpTransport.REQUESTEDMODULES_REQPARAM);
    requestParams.put(AbstractHttpTransport.REQUIRED_REQPARAM, new String[] { "required/a" });
    try {
        new RequestedModuleNames(request, null, null);
        fail("Expected exception");
    } catch (BadRequestException ex) {
    }

}

From source file:pl.edu.icm.coansys.deduplication.document.DuplicateWorkDetectReduceService.java

/**
 * Splits the passed documents into smaller parts. The documents are divided into smaller packs according to the generated keys.
 * The keys are generated by using the {@link WorkKeyGenerator.generateKey(doc, level)} method.
 *///from   ww  w  . j a v  a2s.com
Map<Text, List<DocumentProtos.DocumentMetadata>> splitDocuments(Text key,
        List<DocumentProtos.DocumentMetadata> documents, int level) {

    // check if set was forced to split; if yes, keep the suffix
    String keyStr = key.toString();
    String suffix = "";
    if (keyStr.contains("-")) {
        String[] parts = keyStr.split("-");
        suffix = parts[1];
    }

    Map<Text, List<DocumentProtos.DocumentMetadata>> splitDocuments = Maps.newHashMap();
    for (DocumentProtos.DocumentMetadata doc : documents) {
        String newKeyStr = keyGen.generateKey(doc, level);
        if (!suffix.isEmpty()) {
            newKeyStr = newKeyStr + "-" + suffix;
        }
        Text newKey = new Text(newKeyStr);
        List<DocumentProtos.DocumentMetadata> list = splitDocuments.get(newKey);
        if (list == null) {
            list = Lists.newArrayList();
            splitDocuments.put(newKey, list);
        }
        list.add(doc);
    }

    if (level > maxSplitLevel && splitDocuments.size() == 1) {
        //force split into 2 parts
        Text commonKey = splitDocuments.keySet().iterator().next();
        String commonKeyStr = commonKey.toString();
        if (!commonKeyStr.contains("-")) {
            commonKeyStr += "-";
        }
        Text firstKey = new Text(commonKeyStr + "0");
        Text secondKey = new Text(commonKeyStr + "1");
        List<DocumentProtos.DocumentMetadata> fullList = splitDocuments.get(commonKey);
        int items = fullList.size();
        List<DocumentProtos.DocumentMetadata> firstHalf = fullList.subList(0, items / 2);
        List<DocumentProtos.DocumentMetadata> secondHalf = fullList.subList(items / 2, items);
        splitDocuments.clear();
        splitDocuments.put(firstKey, firstHalf);
        splitDocuments.put(secondKey, secondHalf);
    }

    return splitDocuments;
}

From source file:com.espertech.esper.core.service.EPRuntimeImpl.java

private void processScheduleHandles(ArrayBackedCollection<ScheduleHandle> handles) {
    if (ThreadLogUtil.ENABLED_TRACE) {
        ThreadLogUtil.trace("Found schedules for", handles.size());
    }/*ww  w  .  ja va  2 s. com*/

    if (handles.size() == 0) {
        return;
    }

    // handle 1 result separatly for performance reasons
    if (handles.size() == 1) {
        Object[] handleArray = handles.getArray();
        EPStatementHandleCallback handle = (EPStatementHandleCallback) handleArray[0];

        if ((MetricReportingPath.isMetricsEnabled)
                && (handle.getAgentInstanceHandle().getStatementHandle().getMetricsHandle().isEnabled())) {
            long cpuTimeBefore = MetricUtil.getCPUCurrentThread();
            long wallTimeBefore = MetricUtil.getWall();

            processStatementScheduleSingle(handle, services, engineFilterAndDispatchTimeContext);

            long wallTimeAfter = MetricUtil.getWall();
            long cpuTimeAfter = MetricUtil.getCPUCurrentThread();
            long deltaCPU = cpuTimeAfter - cpuTimeBefore;
            long deltaWall = wallTimeAfter - wallTimeBefore;
            services.getMetricsReportingService().accountTime(
                    handle.getAgentInstanceHandle().getStatementHandle().getMetricsHandle(), deltaCPU,
                    deltaWall, 1);
        } else {
            if ((ThreadingOption.isThreadingEnabled) && (services.getThreadingService().isTimerThreading())) {
                services.getThreadingService().submitTimerWork(
                        new TimerUnitSingle(services, this, handle, this.engineFilterAndDispatchTimeContext));
            } else {
                processStatementScheduleSingle(handle, services, engineFilterAndDispatchTimeContext);
            }
        }

        handles.clear();
        return;
    }

    Object[] matchArray = handles.getArray();
    int entryCount = handles.size();

    // sort multiple matches for the event into statements
    Map<EPStatementAgentInstanceHandle, Object> stmtCallbacks = schedulePerStmtThreadLocal.get();
    stmtCallbacks.clear();
    for (int i = 0; i < entryCount; i++) // need to use the size of the collection
    {
        EPStatementHandleCallback handleCallback = (EPStatementHandleCallback) matchArray[i];
        EPStatementAgentInstanceHandle handle = handleCallback.getAgentInstanceHandle();
        ScheduleHandleCallback callback = handleCallback.getScheduleCallback();

        Object entry = stmtCallbacks.get(handle);

        // This statement has not been encountered before
        if (entry == null) {
            stmtCallbacks.put(handle, callback);
            continue;
        }

        // This statement has been encountered once before
        if (entry instanceof ScheduleHandleCallback) {
            ScheduleHandleCallback existingCallback = (ScheduleHandleCallback) entry;
            ArrayDeque<ScheduleHandleCallback> entries = new ArrayDeque<ScheduleHandleCallback>();
            entries.add(existingCallback);
            entries.add(callback);
            stmtCallbacks.put(handle, entries);
            continue;
        }

        // This statement has been encountered more then once before
        ArrayDeque<ScheduleHandleCallback> entries = (ArrayDeque<ScheduleHandleCallback>) entry;
        entries.add(callback);
    }
    handles.clear();

    for (Map.Entry<EPStatementAgentInstanceHandle, Object> entry : stmtCallbacks.entrySet()) {
        EPStatementAgentInstanceHandle handle = entry.getKey();
        Object callbackObject = entry.getValue();

        if ((MetricReportingPath.isMetricsEnabled)
                && (handle.getStatementHandle().getMetricsHandle().isEnabled())) {
            long cpuTimeBefore = MetricUtil.getCPUCurrentThread();
            long wallTimeBefore = MetricUtil.getWall();

            processStatementScheduleMultiple(handle, callbackObject, services,
                    this.engineFilterAndDispatchTimeContext);

            long wallTimeAfter = MetricUtil.getWall();
            long cpuTimeAfter = MetricUtil.getCPUCurrentThread();
            long deltaCPU = cpuTimeAfter - cpuTimeBefore;
            long deltaWall = wallTimeAfter - wallTimeBefore;
            int numInput = (callbackObject instanceof Collection) ? ((Collection) callbackObject).size() : 1;
            services.getMetricsReportingService().accountTime(handle.getStatementHandle().getMetricsHandle(),
                    deltaCPU, deltaWall, numInput);
        } else {
            if ((ThreadingOption.isThreadingEnabled) && (services.getThreadingService().isTimerThreading())) {
                services.getThreadingService().submitTimerWork(new TimerUnitMultiple(services, this, handle,
                        callbackObject, this.engineFilterAndDispatchTimeContext));
            } else {
                processStatementScheduleMultiple(handle, callbackObject, services,
                        this.engineFilterAndDispatchTimeContext);
            }
        }

        if ((isPrioritized) && (handle.isPreemptive())) {
            break;
        }
    }
}

From source file:org.apache.nifi.processors.standard.GetHTTP.java

private void updateStateMap(ProcessContext context, HttpResponse response, StateMap beforeStateMap,
        String url) {//  w ww.  j  a va 2 s  .  c om
    try {
        Map<String, String> workingMap = new HashMap<>();
        workingMap.putAll(beforeStateMap.toMap());
        final StateManager stateManager = context.getStateManager();
        StateMap oldValue = beforeStateMap;

        long currentTime = System.currentTimeMillis();

        final Header receivedLastModified = response.getFirstHeader(HEADER_LAST_MODIFIED);
        if (receivedLastModified != null) {
            workingMap.put(LAST_MODIFIED + ":" + url, currentTime + ":" + receivedLastModified.getValue());
        }

        final Header receivedEtag = response.getFirstHeader(HEADER_ETAG);
        if (receivedEtag != null) {
            workingMap.put(ETAG + ":" + url, currentTime + ":" + receivedEtag.getValue());
        }

        boolean replaceSucceeded = stateManager.replace(oldValue, workingMap, Scope.LOCAL);
        boolean changed;

        while (!replaceSucceeded) {
            oldValue = stateManager.getState(Scope.LOCAL);
            workingMap.clear();
            workingMap.putAll(oldValue.toMap());

            changed = false;

            if (receivedLastModified != null) {
                Tuple<String, String> storedLastModifiedTuple = parseStateValue(
                        workingMap.get(LAST_MODIFIED + ":" + url));

                if (Long.parseLong(storedLastModifiedTuple.getKey()) < currentTime) {
                    workingMap.put(LAST_MODIFIED + ":" + url,
                            currentTime + ":" + receivedLastModified.getValue());
                    changed = true;
                }
            }

            if (receivedEtag != null) {
                Tuple<String, String> storedLastModifiedTuple = parseStateValue(
                        workingMap.get(ETAG + ":" + url));

                if (Long.parseLong(storedLastModifiedTuple.getKey()) < currentTime) {
                    workingMap.put(ETAG + ":" + url, currentTime + ":" + receivedEtag.getValue());
                    changed = true;
                }
            }

            if (changed) {
                replaceSucceeded = stateManager.replace(oldValue, workingMap, Scope.LOCAL);
            } else {
                break;
            }
        }
    } catch (final IOException ioe) {
        throw new ProcessException(ioe);
    }
}

From source file:org.springframework.cloud.deployer.spi.kubernetes.DefaultContainerFactoryTests.java

@Test
public void createWithVolumeMounts() {
    // test volume mounts defined as deployer properties
    KubernetesDeployerProperties kubernetesDeployerProperties = new KubernetesDeployerProperties();
    DefaultContainerFactory defaultContainerFactory = new DefaultContainerFactory(kubernetesDeployerProperties);

    AppDefinition definition = new AppDefinition("app-test", null);
    Resource resource = getResource();
    Map<String, String> props = new HashMap<>();
    props.put("spring.cloud.deployer.kubernetes.volumeMounts",
            "[" + "{name: 'testhostpath', mountPath: '/test/hostPath'}, "
                    + "{name: 'testpvc', mountPath: '/test/pvc', readOnly: 'true'}, "
                    + "{name: 'testnfs', mountPath: '/test/nfs'}" + "]");
    AppDeploymentRequest appDeploymentRequest = new AppDeploymentRequest(definition, resource, props);

    Container container = defaultContainerFactory.create("app-test", appDeploymentRequest, null, null);

    assertThat(container.getVolumeMounts()).containsOnly(
            new VolumeMount("/test/hostPath", "testhostpath", null, null),
            new VolumeMount("/test/pvc", "testpvc", true, null),
            new VolumeMount("/test/nfs", "testnfs", null, null));

    // test volume mounts defined as app deployment property, overriding the deployer property
    kubernetesDeployerProperties = new KubernetesDeployerProperties();
    kubernetesDeployerProperties/*w w w  . j a v a 2 s .co m*/
            .setVolumeMounts(Stream.of(new VolumeMount("/test/hostPath", "testhostpath", false, null),
                    new VolumeMount("/test/pvc", "testpvc", true, null),
                    new VolumeMount("/test/nfs", "testnfs", false, null)).collect(Collectors.toList()));
    defaultContainerFactory = new DefaultContainerFactory(kubernetesDeployerProperties);

    props.clear();
    props.put("spring.cloud.deployer.kubernetes.volumeMounts",
            "[" + "{name: 'testpvc', mountPath: '/test/pvc/overridden'}, "
                    + "{name: 'testnfs', mountPath: '/test/nfs/overridden', readOnly: 'true'}" + "]");
    container = defaultContainerFactory.create("app-test", appDeploymentRequest, null, null);

    assertThat(container.getVolumeMounts()).containsOnly(
            new VolumeMount("/test/hostPath", "testhostpath", false, null),
            new VolumeMount("/test/pvc/overridden", "testpvc", null, null),
            new VolumeMount("/test/nfs/overridden", "testnfs", true, null));
}

From source file:co.cask.cdap.internal.app.services.http.handlers.PreferencesHttpHandlerTest.java

@Test
public void testProgram() throws Exception {
    deploy(WordCountApp.class, Constants.Gateway.API_VERSION_3_TOKEN, TEST_NAMESPACE2);
    Map<String, String> propMap = Maps.newHashMap();
    Assert.assertEquals(propMap,/*from  w  ww. j a  v a2  s  . co m*/
            getProperty(getURI(TEST_NAMESPACE2, "WordCountApp", "flows", "WordCountFlow"), false, 200));
    getProperty(getURI(TEST_NAMESPACE2, "WordCountApp", "invalidType", "somename"), false, 400);
    getProperty(getURI(TEST_NAMESPACE2, "WordCountApp", "flows", "somename"), false, 404);
    propMap.put("k1", "k349*&#$");
    setProperty(getURI(TEST_NAMESPACE2, "WordCountApp", "flows", "WordCountFlow"), propMap, 200);
    Assert.assertEquals(propMap,
            getProperty(getURI(TEST_NAMESPACE2, "WordCountApp", "flows", "WordCountFlow"), false, 200));
    propMap.put("k1", "instance");
    setProperty(getURI(), propMap, 200);
    Assert.assertEquals(propMap, getProperty(getURI(), true, 200));
    propMap.put("k1", "k349*&#$");
    Assert.assertEquals(propMap,
            getProperty(getURI(TEST_NAMESPACE2, "WordCountApp", "flows", "WordCountFlow"), false, 200));
    deleteProperty(getURI(TEST_NAMESPACE2, "WordCountApp", "flows", "WordCountFlow"), 200);
    propMap.put("k1", "instance");
    Assert.assertEquals(0,
            getProperty(getURI(TEST_NAMESPACE2, "WordCountApp", "flows", "WordCountFlow"), false, 200).size());
    Assert.assertEquals(propMap,
            getProperty(getURI(TEST_NAMESPACE2, "WordCountApp", "flows", "WordCountFlow"), true, 200));
    deleteProperty(getURI(), 200);
    propMap.clear();
    Assert.assertEquals(propMap,
            getProperty(getURI(TEST_NAMESPACE2, "WordCountApp", "flows", "WordCountFlow"), false, 200));
    Assert.assertEquals(propMap, getProperty(getURI(), false, 200));
}

From source file:com.inmobi.databus.partition.TestPartitionReaderWithLeastFullCheckpoint.java

@Test
public void testReadFromLeastFullCheckpoint() throws Exception {
    buffer.clear();//from  w  w w  . j a  va  2  s . c o  m
    String fsUri = fs.getUri().toString();
    Map<Integer, PartitionCheckpoint> expectedDeltaPchk = new HashMap<Integer, PartitionCheckpoint>();
    PartitionReaderStatsExposer prMetrics = new PartitionReaderStatsExposer(testStream, "c1",
            partitionId.toString(), consumerNumber, fsUri);
    fs.delete(databusFiles[1], true);
    fs.mkdirs(databusFiles[1].getParent());
    fs.delete(databusFiles[2], true);
    fs.mkdirs(databusFiles[2].getParent());
    prepareCheckpoint(DatabusStreamWaitingReader.getHadoopStreamFile(fs.getFileStatus(databusFiles[0])), -1,
            databusFiles[0], partitionCheckpointList);
    PartitionReader preader = new PartitionReader(partitionId, partitionCheckpointList, fs, buffer, streamDir,
            conf, inputFormatClass, null, 1000, isDatabusData(), prMetrics, true, partitionMinList, null);
    preader.init();
    Assert.assertEquals(preader.getCurrentFile().toString(), getDateStringFromPath(databusFiles[3].toString()));
    preader.execute();
    Date fromTime = getTimeStampFromFile(databusFiles[0]);
    Date toTime = getTimeStampFromFile(databusFiles[3]);
    TestUtil.prepareExpectedDeltaPck(fromTime, toTime, expectedDeltaPchk, null, streamDir, partitionMinList,
            partitionCheckpointList, true, true);
    TestUtil.assertBuffer(DatabusStreamWaitingReader.getHadoopStreamFile(fs.getFileStatus(databusFiles[3])), 4,
            00, 100, partitionId, buffer, isDatabusData(), expectedDeltaPchk);
    expectedDeltaPchk.clear();
    fromTime = getTimeStampFromFile(databusFiles[3]);
    toTime = getTimeStampFromFile(databusFiles[4]);
    TestUtil.prepareExpectedDeltaPck(fromTime, toTime, expectedDeltaPchk, fs.getFileStatus(databusFiles[3]),
            streamDir, partitionMinList, partitionCheckpointList, false, false);
    TestUtil.assertBuffer(DatabusStreamWaitingReader.getHadoopStreamFile(fs.getFileStatus(databusFiles[4])), 5,
            00, 100, partitionId, buffer, isDatabusData(), expectedDeltaPchk);
    expectedDeltaPchk.clear();
    fromTime = getTimeStampFromFile(databusFiles[4]);
    toTime = getTimeStampFromFile(databusFiles[5]);
    TestUtil.prepareExpectedDeltaPck(fromTime, toTime, expectedDeltaPchk, fs.getFileStatus(databusFiles[4]),
            streamDir, partitionMinList, partitionCheckpointList, false, false);
    TestUtil.assertBuffer(DatabusStreamWaitingReader.getHadoopStreamFile(fs.getFileStatus(databusFiles[5])), 6,
            00, 100, partitionId, buffer, isDatabusData(), expectedDeltaPchk);
    Assert.assertEquals(prMetrics.getMessagesReadFromSource(), 300);
    Assert.assertEquals(prMetrics.getMessagesAddedToBuffer(), 300);
    Assert.assertEquals(prMetrics.getLatestMinuteAlreadyRead(),
            DatabusStreamWaitingReader.getDateFromStreamDir(streamDir, databusFiles[5]).getTime());
}