Example usage for java.util.concurrent BlockingQueue poll

List of usage examples for java.util.concurrent BlockingQueue poll

Introduction

In this page you can find the example usage for java.util.concurrent BlockingQueue poll.

Prototype

E poll();

Source Link

Document

Retrieves and removes the head of this queue, or returns null if this queue is empty.

Usage

From source file:org.apache.nifi.controller.repository.FileSystemRepository.java

private long destroyExpiredArchives(final String containerName, final Path container) throws IOException {
    archiveExpirationLog.debug("Destroying Expired Archives for Container {}", containerName);
    final List<ArchiveInfo> notYetExceedingThreshold = new ArrayList<>();
    long removalTimeThreshold = System.currentTimeMillis() - maxArchiveMillis;
    long oldestArchiveDateFound = System.currentTimeMillis();

    // determine how much space we must have in order to stop deleting old data
    final Long minRequiredSpace = minUsableContainerBytesForArchive.get(containerName);
    if (minRequiredSpace == null) {
        archiveExpirationLog/*  w w w .ja  v a 2  s.  c  o  m*/
                .debug("Could not determine minimum required space so will not destroy any archived data");
        return -1L;
    }

    final long usableSpace = getContainerUsableSpace(containerName);
    final ContainerState containerState = containerStateMap.get(containerName);

    // First, delete files from our queue
    final long startNanos = System.nanoTime();
    final long toFree = minRequiredSpace - usableSpace;
    final BlockingQueue<ArchiveInfo> fileQueue = archivedFiles.get(containerName);
    if (archiveExpirationLog.isDebugEnabled()) {
        if (toFree < 0) {
            archiveExpirationLog.debug(
                    "Currently {} bytes free for Container {}; requirement is {} byte free, so no need to free space until an additional {} bytes are used",
                    usableSpace, containerName, minRequiredSpace, Math.abs(toFree));
        } else {
            archiveExpirationLog.debug(
                    "Currently {} bytes free for Container {}; requirement is {} byte free, so need to free {} bytes",
                    usableSpace, containerName, minRequiredSpace, toFree);
        }
    }

    ArchiveInfo toDelete;
    int deleteCount = 0;
    long freed = 0L;
    while ((toDelete = fileQueue.peek()) != null) {
        try {
            final long fileSize = toDelete.getSize();

            removalTimeThreshold = System.currentTimeMillis() - maxArchiveMillis;

            // we use fileQueue.peek above instead of fileQueue.poll() because we don't always want to
            // remove the head of the queue. Instead, we want to remove it only if we plan to delete it.
            // In order to accomplish this, we just peek at the head and check if it should be deleted.
            // If so, then we call poll() to remove it
            if (freed < toFree || getLastModTime(toDelete.toPath()) < removalTimeThreshold) {
                toDelete = fileQueue.poll(); // remove the head of the queue, which is already stored in 'toDelete'
                Files.deleteIfExists(toDelete.toPath());
                containerState.decrementArchiveCount();
                LOG.debug(
                        "Deleted archived ContentClaim with ID {} from Container {} because the archival size was exceeding the max configured size",
                        toDelete.getName(), containerName);
                freed += fileSize;
                deleteCount++;
            }

            // If we'd freed up enough space, we're done... unless the next file needs to be destroyed based on time.
            if (freed >= toFree) {
                // If the last mod time indicates that it should be removed, just continue loop.
                if (deleteBasedOnTimestamp(fileQueue, removalTimeThreshold)) {
                    archiveExpirationLog.debug(
                            "Freed enough space ({} bytes freed, needed to free {} bytes) but will continue to expire data based on timestamp",
                            freed, toFree);
                    continue;
                }

                archiveExpirationLog.debug(
                        "Freed enough space ({} bytes freed, needed to free {} bytes). Finished expiring data",
                        freed, toFree);

                final ArchiveInfo archiveInfo = fileQueue.peek();
                final long oldestArchiveDate = archiveInfo == null ? System.currentTimeMillis()
                        : getLastModTime(archiveInfo.toPath());

                // Otherwise, we're done. Return the last mod time of the oldest file in the container's archive.
                final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
                if (deleteCount > 0) {
                    LOG.info(
                            "Deleted {} files from archive for Container {}; oldest Archive Date is now {}; container cleanup took {} millis",
                            deleteCount, containerName, new Date(oldestArchiveDate), millis);
                } else {
                    LOG.debug(
                            "Deleted {} files from archive for Container {}; oldest Archive Date is now {}; container cleanup took {} millis",
                            deleteCount, containerName, new Date(oldestArchiveDate), millis);
                }

                return oldestArchiveDate;
            }
        } catch (final IOException ioe) {
            LOG.warn("Failed to delete {} from archive due to {}", toDelete, ioe.toString());
            if (LOG.isDebugEnabled()) {
                LOG.warn("", ioe);
            }
        }
    }

    // Go through each container and grab the archived data into a List
    archiveExpirationLog.debug("Searching for more archived data to expire");
    final StopWatch stopWatch = new StopWatch(true);
    for (int i = 0; i < SECTIONS_PER_CONTAINER; i++) {
        final Path sectionContainer = container.resolve(String.valueOf(i));
        final Path archive = sectionContainer.resolve("archive");
        if (!Files.exists(archive)) {
            continue;
        }

        try {
            final long timestampThreshold = removalTimeThreshold;
            Files.walkFileTree(archive, new SimpleFileVisitor<Path>() {
                @Override
                public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs)
                        throws IOException {
                    if (attrs.isDirectory()) {
                        return FileVisitResult.CONTINUE;
                    }

                    final long lastModTime = getLastModTime(file);
                    if (lastModTime < timestampThreshold) {
                        try {
                            Files.deleteIfExists(file);
                            containerState.decrementArchiveCount();
                            LOG.debug(
                                    "Deleted archived ContentClaim with ID {} from Container {} because it was older than the configured max archival duration",
                                    file.toFile().getName(), containerName);
                        } catch (final IOException ioe) {
                            LOG.warn(
                                    "Failed to remove archived ContentClaim with ID {} from Container {} due to {}",
                                    file.toFile().getName(), containerName, ioe.toString());
                            if (LOG.isDebugEnabled()) {
                                LOG.warn("", ioe);
                            }
                        }
                    } else if (usableSpace < minRequiredSpace) {
                        notYetExceedingThreshold
                                .add(new ArchiveInfo(container, file, attrs.size(), lastModTime));
                    }

                    return FileVisitResult.CONTINUE;
                }
            });
        } catch (final IOException ioe) {
            LOG.warn("Failed to cleanup archived files in {} due to {}", archive, ioe.toString());
            if (LOG.isDebugEnabled()) {
                LOG.warn("", ioe);
            }
        }
    }
    final long deleteExpiredMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS);

    // Sort the list according to last modified time
    Collections.sort(notYetExceedingThreshold, new Comparator<ArchiveInfo>() {
        @Override
        public int compare(final ArchiveInfo o1, final ArchiveInfo o2) {
            return Long.compare(o1.getLastModTime(), o2.getLastModTime());
        }
    });

    final long sortRemainingMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS) - deleteExpiredMillis;

    // Delete the oldest data
    archiveExpirationLog.debug("Deleting data based on timestamp");
    final Iterator<ArchiveInfo> itr = notYetExceedingThreshold.iterator();
    int counter = 0;
    while (itr.hasNext()) {
        final ArchiveInfo archiveInfo = itr.next();

        try {
            final Path path = archiveInfo.toPath();
            Files.deleteIfExists(path);
            containerState.decrementArchiveCount();
            LOG.debug(
                    "Deleted archived ContentClaim with ID {} from Container {} because the archival size was exceeding the max configured size",
                    archiveInfo.getName(), containerName);

            // Check if we've freed enough space every 25 files that we destroy
            if (++counter % 25 == 0) {
                if (getContainerUsableSpace(containerName) > minRequiredSpace) { // check if we can stop now
                    LOG.debug("Finished cleaning up archive for Container {}", containerName);
                    break;
                }
            }
        } catch (final IOException ioe) {
            LOG.warn("Failed to delete {} from archive due to {}", archiveInfo, ioe.toString());
            if (LOG.isDebugEnabled()) {
                LOG.warn("", ioe);
            }
        }

        itr.remove();
    }

    final long deleteOldestMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS) - sortRemainingMillis
            - deleteExpiredMillis;

    long oldestContainerArchive;
    if (notYetExceedingThreshold.isEmpty()) {
        oldestContainerArchive = System.currentTimeMillis();
    } else {
        oldestContainerArchive = notYetExceedingThreshold.get(0).getLastModTime();
    }

    if (oldestContainerArchive < oldestArchiveDateFound) {
        oldestArchiveDateFound = oldestContainerArchive;
    }

    // Queue up the files in the order that they should be destroyed so that we don't have to scan the directories for a while.
    for (final ArchiveInfo toEnqueue : notYetExceedingThreshold.subList(0,
            Math.min(100000, notYetExceedingThreshold.size()))) {
        fileQueue.offer(toEnqueue);
    }

    final long cleanupMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS) - deleteOldestMillis
            - sortRemainingMillis - deleteExpiredMillis;
    LOG.debug(
            "Oldest Archive Date for Container {} is {}; delete expired = {} ms, sort remaining = {} ms, delete oldest = {} ms, cleanup = {} ms",
            containerName, new Date(oldestContainerArchive), deleteExpiredMillis, sortRemainingMillis,
            deleteOldestMillis, cleanupMillis);
    return oldestContainerArchive;
}

From source file:org.opencastproject.videosegmenter.impl.VideoSegmenterServiceImpl.java

/**
 * Returns the segments for the movie accessible through the frame grabbing control.
 * /*from w  ww .j  a va  2s  . c om*/
 * @param video
 *          the mpeg-7 video representation
 * @param dsh
 *          the data source handler
 * @return the list of segments
 * @throws IOException
 *           if accessing a frame fails
 * @throws VideoSegmenterException
 *           if segmentation of the video fails
 */
protected List<Segment> segment(Video video, FrameGrabber dsh) throws IOException, VideoSegmenterException {
    List<Segment> segments = new ArrayList<Segment>();

    int t = 1;
    int lastStableImageTime = 0;
    long startOfSegment = 0;
    int currentSceneStabilityCount = 1;
    boolean sceneChangeImminent = true;
    boolean luckyPunchRecovery = false;
    int segmentCount = 1;
    BufferedImage previousImage = null;
    BufferedImage lastStableImage = null;
    BlockingQueue<Buffer> bufferQueue = new ArrayBlockingQueue<Buffer>(stabilityThreshold + 1);
    long durationInSeconds = video.getMediaTime().getMediaDuration().getDurationInMilliseconds() / 1000;
    Segment contentSegment = video.getTemporalDecomposition().createSegment("segment-" + segmentCount);
    ImageComparator icomp = new ImageComparator(changesThreshold);

    // icomp.setStatistics(true);
    // String imagesPath = PathSupport.concat(new String[] {
    // System.getProperty("java.io.tmpdir"),
    // "videosegments",
    // video.getMediaLocator().getMediaURI().toString().replaceAll("\\W", "-")
    // });
    // icomp.saveImagesTo(new File(imagesPath));

    Buffer buf = dsh.getBuffer();
    while (t < durationInSeconds && buf != null && !buf.isEOM()) {
        BufferedImage bufferedImage = ImageUtils.createImage(buf);
        if (bufferedImage == null)
            throw new VideoSegmenterException("Unable to extract image at time " + t);

        logger.trace("Analyzing video at {} s", t);

        // Compare the new image with our previous sample
        boolean differsFromPreviousImage = icomp.isDifferent(previousImage, bufferedImage, t);

        // We found an image that is different compared to the previous one. Let's see if this image remains stable
        // for some time (STABILITY_THRESHOLD) so we can declare a new scene
        if (differsFromPreviousImage) {
            logger.debug("Found differing image at {} seconds", t);

            // If this is the result of a lucky punch (looking ahead STABILITY_THRESHOLD seconds), then we should
            // really start over an make sure we get the correct beginning of the new scene
            if (!sceneChangeImminent && t - lastStableImageTime > 1) {
                luckyPunchRecovery = true;
                previousImage = lastStableImage;
                bufferQueue.add(buf);
                t = lastStableImageTime;
            } else {
                lastStableImageTime = t - 1;
                lastStableImage = previousImage;
                previousImage = bufferedImage;
                currentSceneStabilityCount = 1;
                t++;
            }
            sceneChangeImminent = true;
        }

        // We are looking ahead and everyhting seems to be fine.
        else if (!sceneChangeImminent) {
            fillLookAheadBuffer(bufferQueue, buf, dsh);
            lastStableImageTime = t;
            t += stabilityThreshold;
            previousImage = bufferedImage;
            lastStableImage = bufferedImage;
        }

        // Seems to be the same image. If we have just recently detected a new scene, let's see if we are able to
        // confirm that this is scene is stable (>= STABILITY_THRESHOLD)
        else if (currentSceneStabilityCount < stabilityThreshold) {
            currentSceneStabilityCount++;
            previousImage = bufferedImage;
            t++;
        }

        // Did we find a new scene?
        else if (currentSceneStabilityCount == stabilityThreshold) {
            lastStableImageTime = t;

            long endOfSegment = t - stabilityThreshold - 1;
            long durationms = (endOfSegment - startOfSegment) * 1000L;

            // Create a new segment if this wasn't the first one
            if (endOfSegment > stabilityThreshold) {
                contentSegment.setMediaTime(new MediaRelTimeImpl(startOfSegment * 1000L, durationms));
                contentSegment = video.getTemporalDecomposition().createSegment("segment-" + ++segmentCount);
                segments.add(contentSegment);
                startOfSegment = endOfSegment;
            }

            // After finding a new segment, likelihood of a stable image is good, let's take a look ahead. Since
            // a processor can't seek, we need to store the buffers in between, in case we need to come back.
            fillLookAheadBuffer(bufferQueue, buf, dsh);
            t += stabilityThreshold;
            previousImage = bufferedImage;
            lastStableImage = bufferedImage;
            currentSceneStabilityCount++;
            sceneChangeImminent = false;
            logger.info("Found new scene at {} s", startOfSegment);
        }

        // Did we find a new scene by looking ahead?
        else if (sceneChangeImminent) {
            // We found a scene change by looking ahead. Now we want to get to the exact position
            lastStableImageTime = t;
            previousImage = bufferedImage;
            lastStableImage = bufferedImage;
            currentSceneStabilityCount++;
            t++;
        }

        // Nothing special, business as usual
        else {
            // If things look stable, then let's look ahead as much as possible without loosing information (which is
            // equal to looking ahead STABILITY_THRESHOLD seconds.
            lastStableImageTime = t;
            fillLookAheadBuffer(bufferQueue, buf, dsh);
            t += stabilityThreshold;
            lastStableImage = bufferedImage;
            previousImage = bufferedImage;
        }

        if (luckyPunchRecovery) {
            buf = bufferQueue.poll();
            luckyPunchRecovery = !bufferQueue.isEmpty();
        } else
            buf = dsh.getBuffer();
    }

    // Finish off the last segment
    long startOfSegmentms = startOfSegment * 1000L;
    long durationms = ((long) durationInSeconds - startOfSegment) * 1000;
    contentSegment.setMediaTime(new MediaRelTimeImpl(startOfSegmentms, durationms));
    segments.add(contentSegment);

    // Print summary
    if (icomp.hasStatistics()) {
        NumberFormat nf = NumberFormat.getNumberInstance();
        nf.setMaximumFractionDigits(2);
        logger.info("Image comparison finished with an average change of {}% in {} comparisons",
                nf.format(icomp.getAvgChange()), icomp.getComparisons());
    }

    // Cleanup
    if (icomp.getSavedImagesDirectory() != null) {
        FileUtils.deleteQuietly(icomp.getSavedImagesDirectory());
    }

    return segments;
}