Example usage for java.lang Long compare

List of usage examples for java.lang Long compare

Introduction

In this page you can find the example usage for java.lang Long compare.

Prototype

public static int compare(long x, long y) 

Source Link

Document

Compares two long values numerically.

Usage

From source file:org.apache.carbondata.core.statusmanager.SegmentUpdateStatusManager.java

/**
 *
 * @param block/*from  www. j a  v a2  s  . co  m*/
 * @param needCompleteList
 * @return
 */
public CarbonFile[] getDeleteDeltaInvalidFilesList(final SegmentUpdateDetails block,
        final boolean needCompleteList, CarbonFile[] allSegmentFiles, boolean isAbortedFile) {

    final long deltaStartTimestamp = getStartTimeOfDeltaFile(CarbonCommonConstants.DELETE_DELTA_FILE_EXT,
            block);

    final long deltaEndTimestamp = getEndTimeOfDeltaFile(CarbonCommonConstants.DELETE_DELTA_FILE_EXT, block);

    List<CarbonFile> files = new ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);

    for (CarbonFile eachFile : allSegmentFiles) {
        String fileName = eachFile.getName();
        if (fileName.endsWith(CarbonCommonConstants.DELETE_DELTA_FILE_EXT)) {
            String blkName = CarbonTablePath.DataFileUtil.getBlockNameFromDeleteDeltaFile(fileName);

            // complete list of delta files of that block is returned.
            if (needCompleteList && block.getBlockName().equalsIgnoreCase(blkName)) {
                files.add(eachFile);
            }

            // invalid delete delta files only will be returned.
            long timestamp = CarbonUpdateUtil
                    .getTimeStampAsLong(CarbonTablePath.DataFileUtil.getTimeStampFromDeleteDeltaFile(fileName));

            if (block.getBlockName().equalsIgnoreCase(blkName)) {

                if (isAbortedFile) {
                    if (Long.compare(timestamp, deltaEndTimestamp) > 0) {
                        files.add(eachFile);
                    }
                } else if (Long.compare(timestamp, deltaStartTimestamp) < 0
                        || Long.compare(timestamp, deltaEndTimestamp) > 0) {
                    files.add(eachFile);
                }
            }
        }
    }

    return files.toArray(new CarbonFile[files.size()]);
}

From source file:org.alfresco.repo.jscript.People.java

private List<NodeRef> getSortedPeopleObjects(List<NodeRef> peopleRefs, final String sortBy, Boolean sortAsc) {
    if (sortBy == null) {
        return peopleRefs;
    }/*ww  w  . j a  va  2  s  .c  om*/

    //make copy of peopleRefs because it can be unmodifiable list.
    List<NodeRef> sortedPeopleRefs = new ArrayList<NodeRef>(peopleRefs);
    final Collator col = Collator.getInstance(I18NUtil.getLocale());
    final NodeService nodeService = services.getNodeService();
    final int orderMultiplicator = ((sortAsc == null) || sortAsc) ? 1 : -1;
    Collections.sort(sortedPeopleRefs, new Comparator<NodeRef>() {
        @Override
        public int compare(NodeRef n1, NodeRef n2) {
            Serializable p1 = getProperty(n1);
            Serializable p2 = getProperty(n2);

            if ((p1 instanceof Long) && (p2 instanceof Long)) {
                return Long.compare((Long) p1, (Long) p2) * orderMultiplicator;
            }

            return col.compare(p1.toString(), p2) * orderMultiplicator;
        }

        public Serializable getProperty(NodeRef nodeRef) {
            Serializable result;

            if ("fullName".equalsIgnoreCase(sortBy)) {
                String firstName = (String) nodeService.getProperty(nodeRef, ContentModel.PROP_FIRSTNAME);
                String lastName = (String) nodeService.getProperty(nodeRef, ContentModel.PROP_LASTNAME);
                String fullName = firstName;
                if (lastName != null && lastName.length() > 0) {
                    fullName = fullName + " " + lastName;
                }

                result = fullName;
            } else if ("jobtitle".equalsIgnoreCase(sortBy)) {
                result = nodeService.getProperty(nodeRef, ContentModel.PROP_JOBTITLE);
            } else if ("email".equalsIgnoreCase(sortBy)) {
                result = nodeService.getProperty(nodeRef, ContentModel.PROP_EMAIL);
            } else if ("usage".equalsIgnoreCase(sortBy)) {
                result = nodeService.getProperty(nodeRef, ContentModel.PROP_SIZE_CURRENT);
            } else if ("quota".equalsIgnoreCase(sortBy)) {
                result = nodeService.getProperty(nodeRef, ContentModel.PROP_SIZE_QUOTA);
            } else {
                // Default
                result = nodeService.getProperty(nodeRef, ContentModel.PROP_USERNAME);
            }

            if (result == null) {
                result = "";
            }

            return result;
        }

    });

    return sortedPeopleRefs;
}

From source file:org.apache.flink.streaming.runtime.operators.windowing.WindowOperator.java

private void restoreFromLegacyAlignedWindowOperator(DataInputViewStreamWrapper in) throws IOException {
    Preconditions.checkArgument(legacyWindowOperatorType != LegacyWindowOperatorType.NONE);

    final long nextEvaluationTime = in.readLong();
    final long nextSlideTime = in.readLong();

    validateMagicNumber(BEGIN_OF_STATE_MAGIC_NUMBER, in.readInt());

    restoredFromLegacyAlignedOpRecords = new PriorityQueue<>(42, new Comparator<StreamRecord<IN>>() {
        @Override//  ww  w  . ja v a2 s . c o m
        public int compare(StreamRecord<IN> o1, StreamRecord<IN> o2) {
            return Long.compare(o1.getTimestamp(), o2.getTimestamp());
        }
    });

    switch (legacyWindowOperatorType) {
    case FAST_ACCUMULATING:
        restoreElementsFromLegacyAccumulatingAlignedWindowOperator(in, nextSlideTime);
        break;
    case FAST_AGGREGATING:
        restoreElementsFromLegacyAggregatingAlignedWindowOperator(in, nextSlideTime);
        break;
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("{} (taskIdx={}) restored {} events from legacy {}.", getClass().getSimpleName(),
                getRuntimeContext().getIndexOfThisSubtask(), restoredFromLegacyAlignedOpRecords.size(),
                legacyWindowOperatorType);
    }
}

From source file:org.apache.nifi.provenance.MiNiFiPersistentProvenanceRepository.java

/**
 * Purges old events from the repository
 *
 * @throws IOException if unable to purge old events due to an I/O problem
 *///w w  w .j a  v  a2s .c o  m
synchronized void purgeOldEvents() throws IOException {
    while (!recoveryFinished.get()) {
        try {
            Thread.sleep(100L);
        } catch (final InterruptedException ie) {
        }
    }

    final List<File> toPurge = new ArrayList<>();
    final long timeCutoff = System.currentTimeMillis() - configuration.getMaxRecordLife(TimeUnit.MILLISECONDS);

    final List<File> sortedByBasename = getLogFiles();
    long bytesUsed = getSize(sortedByBasename, timeCutoff);

    for (final Path path : idToPathMap.get().values()) {
        final File file = path.toFile();
        final long lastModified = file.lastModified();
        if (lastModified > 0L && lastModified < timeCutoff) {
            toPurge.add(file);
        }
    }

    // This comparator sorts the data based on the "basename" of the files. I.e., the numeric portion.
    // We do this because the numeric portion represents the ID of the first event in the log file.
    // As a result, we are sorting based on time, since the ID is monotonically increasing. By doing this,
    // are able to avoid hitting disk continually to check timestamps
    final Comparator<File> sortByBasenameComparator = new Comparator<File>() {
        @Override
        public int compare(final File o1, final File o2) {
            final String baseName1 = StringUtils.substringBefore(o1.getName(), ".");
            final String baseName2 = StringUtils.substringBefore(o2.getName(), ".");

            Long id1 = null;
            Long id2 = null;
            try {
                id1 = Long.parseLong(baseName1);
            } catch (final NumberFormatException nfe) {
                id1 = null;
            }

            try {
                id2 = Long.parseLong(baseName2);
            } catch (final NumberFormatException nfe) {
                id2 = null;
            }

            if (id1 == null && id2 == null) {
                return 0;
            }
            if (id1 == null) {
                return 1;
            }
            if (id2 == null) {
                return -1;
            }

            return Long.compare(id1, id2);
        }
    };

    // If we have too much data (at least 90% of our max capacity), start aging it off
    if (bytesUsed > configuration.getMaxStorageCapacity() * 0.9) {
        Collections.sort(sortedByBasename, sortByBasenameComparator);

        for (final File file : sortedByBasename) {
            toPurge.add(file);
            bytesUsed -= file.length();
            if (bytesUsed < configuration.getMaxStorageCapacity()) {
                // we've shrunk the repo size down enough to stop
                break;
            }
        }
    }

    // Sort all of the files that we want to purge such that the oldest events are aged off first
    Collections.sort(toPurge, sortByBasenameComparator);
    logger.debug("Purging old event files: {}", toPurge);

    // Remove any duplicates that we may have.
    final Set<File> uniqueFilesToPurge = new LinkedHashSet<>(toPurge);

    // Age off the data.
    final Set<String> removed = new LinkedHashSet<>();
    for (File file : uniqueFilesToPurge) {
        final String baseName = StringUtils.substringBefore(file.getName(), ".");
        ExpirationAction currentAction = null;
        try {
            for (final ExpirationAction action : expirationActions) {
                currentAction = action;
                if (!action.hasBeenPerformed(file)) {
                    final File fileBeforeAction = file;
                    final StopWatch stopWatch = new StopWatch(true);
                    file = action.execute(file);
                    stopWatch.stop();
                    logger.info("Successfully performed Expiration Action {} on Provenance Event file {} in {}",
                            action, fileBeforeAction, stopWatch.getDuration());
                }
            }

            removed.add(baseName);
        } catch (final FileNotFoundException fnf) {
            logger.warn(
                    "Failed to perform Expiration Action {} on Provenance Event file {} because the file no longer exists; will not "
                            + "perform additional Expiration Actions on this file",
                    currentAction, file);
            removed.add(baseName);
        } catch (final Throwable t) {
            logger.warn(
                    "Failed to perform Expiration Action {} on Provenance Event file {} due to {}; will not perform additional "
                            + "Expiration Actions on this file at this time",
                    currentAction, file, t.toString());
            logger.warn("", t);
            eventReporter.reportEvent(Severity.WARNING, EVENT_CATEGORY,
                    "Failed to perform Expiration Action " + currentAction + " on Provenance Event file " + file
                            + " due to " + t.toString() + "; will not perform additional Expiration Actions "
                            + "on this file at this time");
        }
    }

    // Update the Map ID to Path map to not include the removed file
    // We cannot obtain the write lock here because there may be a need for the lock in the rollover method,
    // if we have 'backpressure applied'. This would result in a deadlock because the rollover method would be
    // waiting for purgeOldEvents, and purgeOldEvents would be waiting for the write lock held by rollover.
    boolean updated = false;
    while (!updated) {
        final SortedMap<Long, Path> existingPathMap = idToPathMap.get();
        final SortedMap<Long, Path> newPathMap = new TreeMap<>(new PathMapComparator());
        newPathMap.putAll(existingPathMap);

        final Iterator<Map.Entry<Long, Path>> itr = newPathMap.entrySet().iterator();
        while (itr.hasNext()) {
            final Map.Entry<Long, Path> entry = itr.next();
            final String filename = entry.getValue().toFile().getName();
            final String baseName = StringUtils.substringBefore(filename, ".");

            if (removed.contains(baseName)) {
                itr.remove();
            }
        }

        updated = idToPathMap.compareAndSet(existingPathMap, newPathMap);
        logger.debug("After expiration, path map: {}", newPathMap);
    }
}

From source file:org.sleuthkit.autopsy.timeline.events.db.EventDB.java

/**
 * //TODO: update javadoc //TODO: split this into helper methods
 *
 * get a list of {@link AggregateEvent}s.
 *
 * General algorithm is as follows://from w  ww . j  a  v a  2 s  . c  o  m
 *
 * - get all aggregate events, via one db query.
 * - sort them into a map from (type, description)-> aggevent
 * - for each key in map, merge the events and accumulate them in a list
 * to return
 *
 *
 * @param timeRange the Interval within in which all returned aggregate
 *                  events will be.
 * @param filter    only events that pass the filter will be included in
 *                  aggregates events returned
 * @param zoomLevel only events of this level will be included
 * @param lod       description level of detail to use when grouping events
 *
 *
 * @return a list of aggregate events within the given timerange, that pass
 *         the supplied filter, aggregated according to the given event type and
 *         description zoom levels
 */
private List<AggregateEvent> getAggregatedEvents(Interval timeRange, Filter filter,
        EventTypeZoomLevel zoomLevel, DescriptionLOD lod) {
    String descriptionColumn = getDescriptionColumn(lod);
    final boolean useSubTypes = (zoomLevel.equals(EventTypeZoomLevel.SUB_TYPE));

    //get some info about the time range requested
    RangeDivisionInfo rangeInfo = RangeDivisionInfo.getRangeDivisionInfo(timeRange);
    //use 'rounded out' range
    long start = timeRange.getStartMillis() / 1000;//.getLowerBound();
    long end = timeRange.getEndMillis() / 1000;//Millis();//rangeInfo.getUpperBound();
    if (Objects.equals(start, end)) {
        end++;
    }

    //get a sqlite srtftime format string
    String strfTimeFormat = getStrfTimeFormat(rangeInfo.getPeriodSize());

    //effectively map from type to (map from description to events)
    Map<EventType, SetMultimap<String, AggregateEvent>> typeMap = new HashMap<>();

    //get all agregate events in this time unit
    dbReadLock();
    String query = "select strftime('" + strfTimeFormat + "',time , 'unixepoch'"
            + (TimeLineController.getTimeZone().get().equals(TimeZone.getDefault()) ? ", 'localtime'" : "")
            + ") as interval,  group_concat(event_id) as event_ids, Min(time), Max(time),  " + descriptionColumn
            + ", " + (useSubTypes ? SUB_TYPE_COLUMN : BASE_TYPE_COLUMN) // NON-NLS
            + " from events where time >= " + start + " and time < " + end + " and " + getSQLWhere(filter) // NON-NLS
            + " group by interval, " + (useSubTypes ? SUB_TYPE_COLUMN : BASE_TYPE_COLUMN) + " , "
            + descriptionColumn // NON-NLS
            + " order by Min(time)"; // NON-NLS
    //System.out.println(query);
    ResultSet rs = null;
    try (Statement stmt = con.createStatement(); // scoop up requested events in groups organized by interval, type, and desription
    ) {

        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();

        rs = stmt.executeQuery(query);
        stopwatch.stop();
        //System.out.println(stopwatch.elapsedMillis() / 1000.0 + " seconds");
        while (rs.next()) {
            EventType type = useSubTypes ? RootEventType.allTypes.get(rs.getInt(SUB_TYPE_COLUMN))
                    : BaseTypes.values()[rs.getInt(BASE_TYPE_COLUMN)];

            AggregateEvent aggregateEvent = new AggregateEvent(
                    new Interval(rs.getLong("Min(time)") * 1000, rs.getLong("Max(time)") * 1000,
                            TimeLineController.getJodaTimeZone()), // NON-NLS
                    type, Arrays.asList(rs.getString("event_ids").split(",")), // NON-NLS
                    rs.getString(descriptionColumn), lod);

            //put events in map from type/descrition -> event
            SetMultimap<String, AggregateEvent> descrMap = typeMap.get(type);
            if (descrMap == null) {
                descrMap = HashMultimap.<String, AggregateEvent>create();
                typeMap.put(type, descrMap);
            }
            descrMap.put(aggregateEvent.getDescription(), aggregateEvent);
        }

    } catch (SQLException ex) {
        Exceptions.printStackTrace(ex);
    } finally {
        try {
            rs.close();
        } catch (SQLException ex) {
            Exceptions.printStackTrace(ex);
        }
        dbReadUnlock();
    }

    //result list to return
    ArrayList<AggregateEvent> aggEvents = new ArrayList<>();

    //save this for use when comparing gap size
    Period timeUnitLength = rangeInfo.getPeriodSize().getPeriod();

    //For each (type, description) key, merge agg events
    for (SetMultimap<String, AggregateEvent> descrMap : typeMap.values()) {
        for (String descr : descrMap.keySet()) {
            //run through the sorted events, merging together adjacent events
            Iterator<AggregateEvent> iterator = descrMap.get(descr).stream()
                    .sorted((AggregateEvent o1, AggregateEvent o2) -> Long
                            .compare(o1.getSpan().getStartMillis(), o2.getSpan().getStartMillis()))
                    .iterator();
            AggregateEvent current = iterator.next();
            while (iterator.hasNext()) {
                AggregateEvent next = iterator.next();
                Interval gap = current.getSpan().gap(next.getSpan());

                //if they overlap or gap is less one quarter timeUnitLength
                //TODO: 1/4 factor is arbitrary. review! -jm
                if (gap == null || gap.toDuration()
                        .getMillis() <= timeUnitLength.toDurationFrom(gap.getStart()).getMillis() / 4) {
                    //merge them
                    current = AggregateEvent.merge(current, next);
                } else {
                    //done merging into current, set next as new current
                    aggEvents.add(current);
                    current = next;
                }
            }
            aggEvents.add(current);
        }
    }

    //at this point we should have a list of aggregate events.
    //one per type/description spanning consecutive time units as determined in rangeInfo
    return aggEvents;
}

From source file:org.apache.nifi.web.api.dto.DtoFactory.java

public ProcessGroupStatusDTO createProcessGroupStatusDto(final BulletinRepository bulletinRepository,
        final ProcessGroupStatus processGroupStatus) {

    final ProcessGroupStatusDTO processGroupStatusDto = new ProcessGroupStatusDTO();
    processGroupStatusDto.setId(processGroupStatus.getId());
    processGroupStatusDto.setName(processGroupStatus.getName());
    processGroupStatusDto.setStatsLastRefreshed(new Date(processGroupStatus.getCreationTimestamp()));
    processGroupStatusDto.setRead(formatDataSize(processGroupStatus.getBytesRead()));
    processGroupStatusDto.setWritten(formatDataSize(processGroupStatus.getBytesWritten()));
    processGroupStatusDto.setInput(formatCount(processGroupStatus.getInputCount()) + " / "
            + formatDataSize(processGroupStatus.getInputContentSize()));
    processGroupStatusDto.setOutput(formatCount(processGroupStatus.getOutputCount()) + " / "
            + formatDataSize(processGroupStatus.getOutputContentSize()));
    processGroupStatusDto.setTransferred(formatCount(processGroupStatus.getFlowFilesTransferred()) + " / "
            + formatDataSize(processGroupStatus.getBytesTransferred()));
    processGroupStatusDto.setSent(formatCount(processGroupStatus.getFlowFilesSent()) + " / "
            + formatDataSize(processGroupStatus.getBytesSent()));
    processGroupStatusDto.setReceived(formatCount(processGroupStatus.getFlowFilesReceived()) + " / "
            + formatDataSize(processGroupStatus.getBytesReceived()));
    processGroupStatusDto.setActiveThreadCount(processGroupStatus.getActiveThreadCount());

    final String queuedCount = FormatUtils.formatCount(processGroupStatus.getQueuedCount());
    final String queuedSize = FormatUtils.formatDataSize(processGroupStatus.getQueuedContentSize());
    processGroupStatusDto.setQueuedCount(queuedCount);
    processGroupStatusDto.setQueuedSize(queuedSize);
    processGroupStatusDto.setQueued(queuedCount + " / " + queuedSize);

    final Map<String, StatusDTO> componentStatusDtoMap = new HashMap<>();

    // processor status
    final Collection<ProcessorStatusDTO> processorStatDtoCollection = new ArrayList<>();
    processGroupStatusDto.setProcessorStatus(processorStatDtoCollection);
    final Collection<ProcessorStatus> processorStatusCollection = processGroupStatus.getProcessorStatus();
    if (processorStatusCollection != null) {
        for (final ProcessorStatus processorStatus : processorStatusCollection) {
            final ProcessorStatusDTO processorStatusDto = createProcessorStatusDto(processorStatus);
            processorStatDtoCollection.add(processorStatusDto);
            componentStatusDtoMap.put(processorStatusDto.getId(), processorStatusDto);
        }//from  w  w  w  .  j  a  va 2s .c  o  m
    }

    // connection status
    final Collection<ConnectionStatusDTO> connectionStatusDtoCollection = new ArrayList<>();
    processGroupStatusDto.setConnectionStatus(connectionStatusDtoCollection);
    final Collection<ConnectionStatus> connectionStatusCollection = processGroupStatus.getConnectionStatus();
    if (connectionStatusCollection != null) {
        for (final ConnectionStatus connectionStatus : connectionStatusCollection) {
            final ConnectionStatusDTO connectionStatusDto = createConnectionStatusDto(connectionStatus);
            connectionStatusDtoCollection.add(connectionStatusDto);
        }
    }

    // local child process groups
    final Collection<ProcessGroupStatusDTO> childProcessGroupStatusDtoCollection = new ArrayList<>();
    processGroupStatusDto.setProcessGroupStatus(childProcessGroupStatusDtoCollection);
    final Collection<ProcessGroupStatus> childProcessGroupStatusCollection = processGroupStatus
            .getProcessGroupStatus();
    if (childProcessGroupStatusCollection != null) {
        for (final ProcessGroupStatus childProcessGroupStatus : childProcessGroupStatusCollection) {
            final ProcessGroupStatusDTO childProcessGroupStatusDto = createProcessGroupStatusDto(
                    bulletinRepository, childProcessGroupStatus);
            childProcessGroupStatusDtoCollection.add(childProcessGroupStatusDto);
        }
    }

    // remote child process groups
    final Collection<RemoteProcessGroupStatusDTO> childRemoteProcessGroupStatusDtoCollection = new ArrayList<>();
    processGroupStatusDto.setRemoteProcessGroupStatus(childRemoteProcessGroupStatusDtoCollection);
    final Collection<RemoteProcessGroupStatus> childRemoteProcessGroupStatusCollection = processGroupStatus
            .getRemoteProcessGroupStatus();
    if (childRemoteProcessGroupStatusCollection != null) {
        for (final RemoteProcessGroupStatus childRemoteProcessGroupStatus : childRemoteProcessGroupStatusCollection) {
            final RemoteProcessGroupStatusDTO childRemoteProcessGroupStatusDto = createRemoteProcessGroupStatusDto(
                    childRemoteProcessGroupStatus);
            childRemoteProcessGroupStatusDtoCollection.add(childRemoteProcessGroupStatusDto);
            componentStatusDtoMap.put(childRemoteProcessGroupStatusDto.getId(),
                    childRemoteProcessGroupStatusDto);
        }
    }

    // input ports
    final Collection<PortStatusDTO> inputPortStatusDtoCollection = new ArrayList<>();
    processGroupStatusDto.setInputPortStatus(inputPortStatusDtoCollection);
    final Collection<PortStatus> inputPortStatusCollection = processGroupStatus.getInputPortStatus();
    if (inputPortStatusCollection != null) {
        for (final PortStatus portStatus : inputPortStatusCollection) {
            final PortStatusDTO portStatusDto = createPortStatusDto(portStatus);
            inputPortStatusDtoCollection.add(portStatusDto);
            componentStatusDtoMap.put(portStatusDto.getId(), portStatusDto);
        }
    }

    // output ports
    final Collection<PortStatusDTO> outputPortStatusDtoCollection = new ArrayList<>();
    processGroupStatusDto.setOutputPortStatus(outputPortStatusDtoCollection);
    final Collection<PortStatus> outputPortStatusCollection = processGroupStatus.getOutputPortStatus();
    if (outputPortStatusCollection != null) {
        for (final PortStatus portStatus : outputPortStatusCollection) {
            final PortStatusDTO portStatusDto = createPortStatusDto(portStatus);
            outputPortStatusDtoCollection.add(portStatusDto);
            componentStatusDtoMap.put(portStatusDto.getId(), portStatusDto);
        }
    }

    // get the bulletins for this group and associate with the specific child component
    if (bulletinRepository != null) {
        if (processGroupStatusDto.getBulletins() == null) {
            processGroupStatusDto.setBulletins(new ArrayList<BulletinDTO>());
        }

        // locate bulletins for this process group
        final List<Bulletin> results = bulletinRepository
                .findBulletinsForGroupBySource(processGroupStatus.getId(), MAX_BULLETINS_PER_COMPONENT);
        for (final Bulletin bulletin : results) {
            final StatusDTO status = componentStatusDtoMap.get(bulletin.getSourceId());

            // ensure this connectable is still in the flow
            if (status != null) {
                if (status.getBulletins() == null) {
                    status.setBulletins(new ArrayList<BulletinDTO>());
                }

                // convert the result into a dto
                final BulletinDTO bulletinDto = createBulletinDto(bulletin);
                status.getBulletins().add(bulletinDto);

                // create a copy for the parent group
                final BulletinDTO copy = copy(bulletinDto);
                copy.setGroupId(StringUtils.EMPTY);
                copy.setSourceId(processGroupStatus.getId());
                copy.setSourceName(processGroupStatus.getName());
                processGroupStatusDto.getBulletins().add(copy);
            }
        }

        // copy over descendant bulletins
        for (final ProcessGroupStatusDTO childProcessGroupStatusDto : processGroupStatusDto
                .getProcessGroupStatus()) {
            if (childProcessGroupStatusDto.getBulletins() != null) {
                for (final BulletinDTO descendantBulletinDto : childProcessGroupStatusDto.getBulletins()) {
                    // create a copy for the parent group
                    final BulletinDTO copy = copy(descendantBulletinDto);
                    copy.setGroupId(StringUtils.EMPTY);
                    copy.setSourceId(processGroupStatus.getId());
                    copy.setSourceName(processGroupStatus.getName());
                    processGroupStatusDto.getBulletins().add(copy);
                }
            }
        }

        // sort the bulletins
        Collections.sort(processGroupStatusDto.getBulletins(), new Comparator<BulletinDTO>() {
            @Override
            public int compare(BulletinDTO o1, BulletinDTO o2) {
                if (o1 == null && o2 == null) {
                    return 0;
                }
                if (o1 == null) {
                    return 1;
                }
                if (o2 == null) {
                    return -1;
                }

                return -Long.compare(o1.getId(), o2.getId());
            }
        });

        // prune the response to only include the max number of bulletins
        if (processGroupStatusDto.getBulletins().size() > MAX_BULLETINS_PER_COMPONENT) {
            processGroupStatusDto
                    .setBulletins(processGroupStatusDto.getBulletins().subList(0, MAX_BULLETINS_PER_COMPONENT));
        }
    }

    return processGroupStatusDto;
}

From source file:org.apache.nifi.processors.standard.TailFile.java

/**
 * Returns a list of all Files that match the following criteria:
 *
 * <ul>/* ww w.j  a v a  2s .c  o m*/
 * <li>Filename matches the Rolling Filename Pattern</li>
 * <li>Filename does not match the actual file being tailed</li>
 * <li>The Last Modified Time on the file is equal to or later than the
 * given minimum timestamp</li>
 * </ul>
 *
 * <p>
 * The List that is returned will be ordered by file timestamp, providing
 * the oldest file first.
 * </p>
 *
 * @param context the ProcessContext to use in order to determine Processor
 * configuration
 * @param minTimestamp any file with a Last Modified Time before this
 * timestamp will not be returned
 * @return a list of all Files that have rolled over
 * @throws IOException if unable to perform the listing of files
 */
private List<File> getRolledOffFiles(final ProcessContext context, final long minTimestamp,
        final String tailFilePath) throws IOException {
    final File tailFile = new File(tailFilePath);
    File directory = tailFile.getParentFile();
    if (directory == null) {
        directory = new File(".");
    }

    String rollingPattern = context.getProperty(ROLLING_FILENAME_PATTERN).getValue();
    if (rollingPattern == null) {
        return Collections.emptyList();
    } else {
        rollingPattern = rollingPattern.replace("${filename}",
                StringUtils.substringBeforeLast(tailFile.getName(), "."));
    }

    final List<File> rolledOffFiles = new ArrayList<>();
    try (final DirectoryStream<Path> dirStream = Files.newDirectoryStream(directory.toPath(), rollingPattern)) {
        for (final Path path : dirStream) {
            final File file = path.toFile();
            final long lastMod = file.lastModified();

            if (file.lastModified() < minTimestamp) {
                getLogger().debug(
                        "Found rolled off file {} but its last modified timestamp is before the cutoff (Last Mod = {}, Cutoff = {}) so will not consume it",
                        new Object[] { file, lastMod, minTimestamp });

                continue;
            } else if (file.equals(tailFile)) {
                continue;
            }

            rolledOffFiles.add(file);
        }
    }

    // Sort files based on last modified timestamp. If same timestamp, use filename as a secondary sort, as often
    // files that are rolled over are given a naming scheme that is lexicographically sort in the same order as the
    // timestamp, such as yyyy-MM-dd-HH-mm-ss
    Collections.sort(rolledOffFiles, new Comparator<File>() {
        @Override
        public int compare(final File o1, final File o2) {
            final int lastModifiedComp = Long.compare(o1.lastModified(), o2.lastModified());
            if (lastModifiedComp != 0) {
                return lastModifiedComp;
            }

            return o1.getName().compareTo(o2.getName());
        }
    });

    return rolledOffFiles;
}

From source file:org.apache.sysml.hops.rewrite.HopRewriteUtils.java

/**
 * Compares the size of outputs from hop1 and hop2, in terms of number
 * of matrix cells. /*from  w  w  w .j a  v a2  s .c o m*/
 * 
 * @param hop1 high-level operator 1
 * @param hop2 high-level operator 2
 * @return 0 if sizes are equal, &lt;0 for hop1&lt;hop2, &gt;0 for hop1&gt;hop2.
 */
public static int compareSize(Hop hop1, Hop hop2) {
    long size1 = hop1.getDim1() * hop1.getDim2();
    long size2 = hop2.getDim1() * hop2.getDim2();
    return Long.compare(size1, size2);
}

From source file:org.apache.nifi.controller.repository.FileSystemRepository.java

private long destroyExpiredArchives(final String containerName, final Path container) throws IOException {
    archiveExpirationLog.debug("Destroying Expired Archives for Container {}", containerName);
    final List<ArchiveInfo> notYetExceedingThreshold = new ArrayList<>();
    long removalTimeThreshold = System.currentTimeMillis() - maxArchiveMillis;
    long oldestArchiveDateFound = System.currentTimeMillis();

    // determine how much space we must have in order to stop deleting old data
    final Long minRequiredSpace = minUsableContainerBytesForArchive.get(containerName);
    if (minRequiredSpace == null) {
        archiveExpirationLog// ww  w . j  a v a  2s. co m
                .debug("Could not determine minimum required space so will not destroy any archived data");
        return -1L;
    }

    final long usableSpace = getContainerUsableSpace(containerName);
    final ContainerState containerState = containerStateMap.get(containerName);

    // First, delete files from our queue
    final long startNanos = System.nanoTime();
    final long toFree = minRequiredSpace - usableSpace;
    final BlockingQueue<ArchiveInfo> fileQueue = archivedFiles.get(containerName);
    if (archiveExpirationLog.isDebugEnabled()) {
        if (toFree < 0) {
            archiveExpirationLog.debug(
                    "Currently {} bytes free for Container {}; requirement is {} byte free, so no need to free space until an additional {} bytes are used",
                    usableSpace, containerName, minRequiredSpace, Math.abs(toFree));
        } else {
            archiveExpirationLog.debug(
                    "Currently {} bytes free for Container {}; requirement is {} byte free, so need to free {} bytes",
                    usableSpace, containerName, minRequiredSpace, toFree);
        }
    }

    ArchiveInfo toDelete;
    int deleteCount = 0;
    long freed = 0L;
    while ((toDelete = fileQueue.peek()) != null) {
        try {
            final long fileSize = toDelete.getSize();

            removalTimeThreshold = System.currentTimeMillis() - maxArchiveMillis;

            // we use fileQueue.peek above instead of fileQueue.poll() because we don't always want to
            // remove the head of the queue. Instead, we want to remove it only if we plan to delete it.
            // In order to accomplish this, we just peek at the head and check if it should be deleted.
            // If so, then we call poll() to remove it
            if (freed < toFree || getLastModTime(toDelete.toPath()) < removalTimeThreshold) {
                toDelete = fileQueue.poll(); // remove the head of the queue, which is already stored in 'toDelete'
                Files.deleteIfExists(toDelete.toPath());
                containerState.decrementArchiveCount();
                LOG.debug(
                        "Deleted archived ContentClaim with ID {} from Container {} because the archival size was exceeding the max configured size",
                        toDelete.getName(), containerName);
                freed += fileSize;
                deleteCount++;
            }

            // If we'd freed up enough space, we're done... unless the next file needs to be destroyed based on time.
            if (freed >= toFree) {
                // If the last mod time indicates that it should be removed, just continue loop.
                if (deleteBasedOnTimestamp(fileQueue, removalTimeThreshold)) {
                    archiveExpirationLog.debug(
                            "Freed enough space ({} bytes freed, needed to free {} bytes) but will continue to expire data based on timestamp",
                            freed, toFree);
                    continue;
                }

                archiveExpirationLog.debug(
                        "Freed enough space ({} bytes freed, needed to free {} bytes). Finished expiring data",
                        freed, toFree);

                final ArchiveInfo archiveInfo = fileQueue.peek();
                final long oldestArchiveDate = archiveInfo == null ? System.currentTimeMillis()
                        : getLastModTime(archiveInfo.toPath());

                // Otherwise, we're done. Return the last mod time of the oldest file in the container's archive.
                final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
                if (deleteCount > 0) {
                    LOG.info(
                            "Deleted {} files from archive for Container {}; oldest Archive Date is now {}; container cleanup took {} millis",
                            deleteCount, containerName, new Date(oldestArchiveDate), millis);
                } else {
                    LOG.debug(
                            "Deleted {} files from archive for Container {}; oldest Archive Date is now {}; container cleanup took {} millis",
                            deleteCount, containerName, new Date(oldestArchiveDate), millis);
                }

                return oldestArchiveDate;
            }
        } catch (final IOException ioe) {
            LOG.warn("Failed to delete {} from archive due to {}", toDelete, ioe.toString());
            if (LOG.isDebugEnabled()) {
                LOG.warn("", ioe);
            }
        }
    }

    // Go through each container and grab the archived data into a List
    archiveExpirationLog.debug("Searching for more archived data to expire");
    final StopWatch stopWatch = new StopWatch(true);
    for (int i = 0; i < SECTIONS_PER_CONTAINER; i++) {
        final Path sectionContainer = container.resolve(String.valueOf(i));
        final Path archive = sectionContainer.resolve("archive");
        if (!Files.exists(archive)) {
            continue;
        }

        try {
            final long timestampThreshold = removalTimeThreshold;
            Files.walkFileTree(archive, new SimpleFileVisitor<Path>() {
                @Override
                public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs)
                        throws IOException {
                    if (attrs.isDirectory()) {
                        return FileVisitResult.CONTINUE;
                    }

                    final long lastModTime = getLastModTime(file);
                    if (lastModTime < timestampThreshold) {
                        try {
                            Files.deleteIfExists(file);
                            containerState.decrementArchiveCount();
                            LOG.debug(
                                    "Deleted archived ContentClaim with ID {} from Container {} because it was older than the configured max archival duration",
                                    file.toFile().getName(), containerName);
                        } catch (final IOException ioe) {
                            LOG.warn(
                                    "Failed to remove archived ContentClaim with ID {} from Container {} due to {}",
                                    file.toFile().getName(), containerName, ioe.toString());
                            if (LOG.isDebugEnabled()) {
                                LOG.warn("", ioe);
                            }
                        }
                    } else if (usableSpace < minRequiredSpace) {
                        notYetExceedingThreshold
                                .add(new ArchiveInfo(container, file, attrs.size(), lastModTime));
                    }

                    return FileVisitResult.CONTINUE;
                }
            });
        } catch (final IOException ioe) {
            LOG.warn("Failed to cleanup archived files in {} due to {}", archive, ioe.toString());
            if (LOG.isDebugEnabled()) {
                LOG.warn("", ioe);
            }
        }
    }
    final long deleteExpiredMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS);

    // Sort the list according to last modified time
    Collections.sort(notYetExceedingThreshold, new Comparator<ArchiveInfo>() {
        @Override
        public int compare(final ArchiveInfo o1, final ArchiveInfo o2) {
            return Long.compare(o1.getLastModTime(), o2.getLastModTime());
        }
    });

    final long sortRemainingMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS) - deleteExpiredMillis;

    // Delete the oldest data
    archiveExpirationLog.debug("Deleting data based on timestamp");
    final Iterator<ArchiveInfo> itr = notYetExceedingThreshold.iterator();
    int counter = 0;
    while (itr.hasNext()) {
        final ArchiveInfo archiveInfo = itr.next();

        try {
            final Path path = archiveInfo.toPath();
            Files.deleteIfExists(path);
            containerState.decrementArchiveCount();
            LOG.debug(
                    "Deleted archived ContentClaim with ID {} from Container {} because the archival size was exceeding the max configured size",
                    archiveInfo.getName(), containerName);

            // Check if we've freed enough space every 25 files that we destroy
            if (++counter % 25 == 0) {
                if (getContainerUsableSpace(containerName) > minRequiredSpace) { // check if we can stop now
                    LOG.debug("Finished cleaning up archive for Container {}", containerName);
                    break;
                }
            }
        } catch (final IOException ioe) {
            LOG.warn("Failed to delete {} from archive due to {}", archiveInfo, ioe.toString());
            if (LOG.isDebugEnabled()) {
                LOG.warn("", ioe);
            }
        }

        itr.remove();
    }

    final long deleteOldestMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS) - sortRemainingMillis
            - deleteExpiredMillis;

    long oldestContainerArchive;
    if (notYetExceedingThreshold.isEmpty()) {
        oldestContainerArchive = System.currentTimeMillis();
    } else {
        oldestContainerArchive = notYetExceedingThreshold.get(0).getLastModTime();
    }

    if (oldestContainerArchive < oldestArchiveDateFound) {
        oldestArchiveDateFound = oldestContainerArchive;
    }

    // Queue up the files in the order that they should be destroyed so that we don't have to scan the directories for a while.
    for (final ArchiveInfo toEnqueue : notYetExceedingThreshold.subList(0,
            Math.min(100000, notYetExceedingThreshold.size()))) {
        fileQueue.offer(toEnqueue);
    }

    final long cleanupMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS) - deleteOldestMillis
            - sortRemainingMillis - deleteExpiredMillis;
    LOG.debug(
            "Oldest Archive Date for Container {} is {}; delete expired = {} ms, sort remaining = {} ms, delete oldest = {} ms, cleanup = {} ms",
            containerName, new Date(oldestContainerArchive), deleteExpiredMillis, sortRemainingMillis,
            deleteOldestMillis, cleanupMillis);
    return oldestContainerArchive;
}

From source file:org.eclipse.hawkbit.mgmt.rest.resource.MgmtTargetResourceTest.java

@Test
@Description("Verifies that the API returns the status list with expected content.")
public void getMultipleActionStatus() throws Exception {
    final String knownTargetId = "targetId";
    final Action action = generateTargetWithTwoUpdatesWithOneOverride(knownTargetId).get(0);
    // retrieve list in default descending order for actionstaus entries
    final List<ActionStatus> actionStatus = deploymentManagement.findActionStatusByAction(PAGE, action.getId())
            .getContent().stream().sorted((e1, e2) -> Long.compare(e2.getId(), e1.getId()))
            .collect(Collectors.toList());

    // sort is default descending order, latest status first
    mvc.perform(get(MgmtRestConstants.TARGET_V1_REQUEST_MAPPING + "/" + knownTargetId + "/"
            + MgmtRestConstants.TARGET_V1_ACTIONS + "/" + action.getId() + "/"
            + MgmtRestConstants.TARGET_V1_ACTION_STATUS)).andDo(MockMvcResultPrinter.print())
            .andExpect(status().isOk())//  w w  w  .j  a v  a 2s . c om
            .andExpect(jsonPath("content.[0].id", equalTo(actionStatus.get(0).getId().intValue())))
            .andExpect(jsonPath("content.[0].type", equalTo("canceling")))
            .andExpect(jsonPath("content.[0].messages",
                    hasItem("Update Server: cancel obsolete action due to new update")))
            .andExpect(jsonPath("content.[0].reportedAt", equalTo(actionStatus.get(0).getCreatedAt())))
            .andExpect(jsonPath("content.[1].id", equalTo(actionStatus.get(1).getId().intValue())))
            .andExpect(jsonPath("content.[1].type", equalTo("running")))
            .andExpect(jsonPath("content.[1].reportedAt", equalTo(actionStatus.get(1).getCreatedAt())))
            .andExpect(jsonPath(JSON_PATH_PAGED_LIST_TOTAL, equalTo(2)))
            .andExpect(jsonPath(JSON_PATH_PAGED_LIST_SIZE, equalTo(2)))
            .andExpect(jsonPath(JSON_PATH_PAGED_LIST_CONTENT, hasSize(2)));
}