Example usage for java.util NavigableMap isEmpty

List of usage examples for java.util NavigableMap isEmpty

Introduction

In this page you can find the example usage for java.util NavigableMap isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this map contains no key-value mappings.

Usage

From source file:com.att.aro.core.packetanalysis.impl.VideoUsageAnalysisImpl.java

/**
 * <pre>// w  ww.  j  ava2s. c  o  m
 * Final pass to fix update values if not set already.
 * Examine startTime for each segment compared with next segment to determine duration when not set.
 *
 * Problems occur when there is a missing segment and on the last segment.
 *  Missing segments cause an approximation by dividing the duration by the number of missing segments+1
 *  The last segment simply repeats the previous duration, this should not skew results by much.
 */
private void updateDuration() {
    log.info("updateDuration()");
    if (videoUsage != null) {
        for (AROManifest manifest : videoUsage.getManifests()) {
            if (manifest != null) {
                NavigableMap<String, VideoEvent> eventMap = manifest.getSegmentEventList();
                if (manifest instanceof ManifestDash && !eventMap.isEmpty()) {
                    int seg = 0;
                    Entry<String, VideoEvent> lastEntry = eventMap.lastEntry();
                    double lastSeg = lastEntry != null ? lastEntry.getValue().getSegment() : 0;
                    String key = manifest.generateVideoEventKey(0, 0, "z");
                    Entry<String, VideoEvent> val;
                    Entry<String, VideoEvent> valn;
                    double duration = 0;
                    VideoEvent event;
                    String segNextKey = null;
                    for (seg = 1; seg <= lastSeg; seg++) {
                        segNextKey = manifest.generateVideoEventKey(seg, 0, "z");
                        val = eventMap.higherEntry(key);
                        valn = eventMap.higherEntry(segNextKey);
                        if (val == null || valn == null) {
                            break;
                        }
                        event = val.getValue();
                        VideoEvent eventNext = valn.getValue();
                        duration = eventNext.getSegmentStartTime() - event.getSegmentStartTime();
                        double deltaSegment = eventNext.getSegment() - event.getSegment();
                        if (deltaSegment > 1) {
                            duration /= deltaSegment;
                        }
                        updateSegmentDuration(eventMap, key, segNextKey, duration);
                        key = segNextKey;
                    }
                    // handle any segments at the end
                    val = eventMap.higherEntry(key);
                    if (val != null && segNextKey != null) {
                        updateSegmentDuration(eventMap, key, segNextKey, duration);
                    }
                }
            }
        }
    }
}

From source file:com.mirth.connect.server.controllers.DonkeyMessageController.java

private List<MessageSearchResult> searchMessages(MessageFilter filter, String channelId, int offset,
        int limit) {
    long startTime = System.currentTimeMillis();

    FilterOptions filterOptions = new FilterOptions(filter, channelId);
    long maxMessageId = filterOptions.getMaxMessageId();
    long minMessageId = filterOptions.getMinMessageId();

    Long localChannelId = ChannelController.getInstance().getLocalChannelId(channelId);
    Map<String, Object> params = getBasicParameters(filter, localChannelId);

    try {/*from  w  w  w.java2 s.c o m*/
        NavigableMap<Long, MessageSearchResult> messages = new TreeMap<Long, MessageSearchResult>();
        SqlSession session = SqlConfig.getSqlSessionManager();

        int offsetRemaining = offset;
        /*
         * If the limit is greater than the default batch size, use the limit, but cap it at
         * 50000.
         */
        long batchSize = Math.min(Math.max(limit, 500), 50000);
        long totalSearched = 0;

        while (messages.size() < limit && maxMessageId >= minMessageId) {
            /*
             * Slowly increase the batch size in case all the necessary results are found early
             * on.
             */
            if (totalSearched >= 100000 && batchSize < 50000) {
                batchSize = 50000;
            } else if (totalSearched >= 10000 && batchSize < 10000) {
                batchSize = 10000;
            } else if (totalSearched >= 1000 && batchSize < 1000) {
                batchSize = 1000;
            }

            /*
             * Search in descending order so that messages will be found from the greatest to
             * lowest message id
             */
            long currentMinMessageId = Math.max(maxMessageId - batchSize + 1, minMessageId);
            params.put("maxMessageId", maxMessageId);
            params.put("minMessageId", currentMinMessageId);
            maxMessageId -= batchSize;
            totalSearched += batchSize;

            Map<Long, MessageSearchResult> foundMessages = searchAll(session, params, filter, localChannelId,
                    false, filterOptions);

            if (!foundMessages.isEmpty()) {
                /*
                 * Skip results until there is no offset remaining. This is required when
                 * viewing results beyond the first page
                 */
                if (offsetRemaining >= foundMessages.size()) {
                    offsetRemaining -= foundMessages.size();
                } else if (offsetRemaining == 0) {
                    messages.putAll(foundMessages);
                } else {
                    NavigableMap<Long, MessageSearchResult> orderedMessages = new TreeMap<Long, MessageSearchResult>(
                            foundMessages);

                    while (offsetRemaining-- > 0) {
                        orderedMessages.pollLastEntry();
                    }

                    messages.putAll(orderedMessages);
                }
            }
        }

        // Remove results beyond the limit requested
        while (messages.size() > limit) {
            messages.pollFirstEntry();
        }

        List<MessageSearchResult> results = new ArrayList<MessageSearchResult>(messages.size());

        /*
         * Now that we have the message and metadata ids that should be returned as the result,
         * we need to retrieve the message data for those.
         */
        if (!messages.isEmpty()) {
            Iterator<Long> iterator = messages.descendingKeySet().iterator();

            while (iterator.hasNext()) {
                Map<String, Object> messageParams = new HashMap<String, Object>();
                messageParams.put("localChannelId", localChannelId);

                ListRangeIterator listRangeIterator = new ListRangeIterator(iterator,
                        ListRangeIterator.DEFAULT_LIST_LIMIT, false, null);

                while (listRangeIterator.hasNext()) {
                    ListRangeItem item = listRangeIterator.next();
                    List<Long> list = item.getList();
                    Long startRange = item.getStartRange();
                    Long endRange = item.getEndRange();

                    if (list != null || (startRange != null && endRange != null)) {
                        if (list != null) {
                            messageParams.remove("minMessageId");
                            messageParams.remove("maxMessageId");
                            messageParams.put("includeMessageList", StringUtils.join(list, ","));
                        } else {
                            messageParams.remove("includeMessageList");
                            messageParams.put("minMessageId", endRange);
                            messageParams.put("maxMessageId", startRange);
                        }

                        // Get the current batch of results
                        List<MessageSearchResult> currentResults = session
                                .selectList("Message.selectMessagesById", messageParams);

                        // Add the metadata ids to each result
                        for (MessageSearchResult currentResult : currentResults) {
                            currentResult.setMetaDataIdSet(
                                    messages.get(currentResult.getMessageId()).getMetaDataIdSet());
                        }

                        // Add the current batch to the final list of results
                        results.addAll(currentResults);
                    }
                }
            }
        }

        return results;
    } finally {
        long endTime = System.currentTimeMillis();
        logger.debug("Search executed in " + (endTime - startTime) + "ms");
    }
}

From source file:org.alfresco.repo.imap.AlfrescoImapFolder.java

/**
 * Returns UIDNEXT value of the folder./*  w w w . ja v a 2 s.  c  o  m*/
 * 
 * @return UIDNEXT value.
 */
@Override
public long getUidNext() {
    NavigableMap<Long, FileInfo> search = getFolderStatus().search;
    return search.isEmpty() ? 1 : search.lastKey() + 1;
}

From source file:org.alfresco.repo.imap.ImapServiceImpl.java

/**
 * Search for emails in specified folder depending on view mode.
 * /* w  ww . j av a  2s . co  m*/
 * Shallow list of files
 * 
 * @param contextNodeRef context folder for search
 * @param viewMode context folder view mode
 * @return list of emails that context folder contains.
 */
public FolderStatus getFolderStatus(final String userName, final NodeRef contextNodeRef,
        ImapViewMode viewMode) {
    if (logger.isDebugEnabled()) {
        logger.debug("getFolderStatus contextNodeRef=" + contextNodeRef + ", viewMode=" + viewMode);
    }

    // No need to ACL check the change token read
    String changeToken = AuthenticationUtil.runAs(new RunAsWork<String>() {
        @Override
        public String doWork() throws Exception {
            return (String) nodeService.getProperty(contextNodeRef, ImapModel.PROP_CHANGE_TOKEN);
        }
    }, AuthenticationUtil.getSystemUserName());

    Pair<String, String> cacheKey = null;
    if (changeToken != null) {
        cacheKey = new Pair<String, String>(userName, changeToken);
        this.folderCacheLock.readLock().lock();
        try {
            FolderStatus result = this.folderCache.get(cacheKey);
            if (result != null) {
                return result;
            }
        } finally {
            this.folderCacheLock.readLock().unlock();
        }
    }

    List<FileInfo> fileInfos = null;
    FileFilterMode.setClient(Client.imap);
    try {
        fileInfos = fileFolderService.listFiles(contextNodeRef);
    } finally {
        FileFilterMode.clearClient();
    }

    final NavigableMap<Long, FileInfo> currentSearch = new TreeMap<Long, FileInfo>();

    switch (viewMode) {
    case MIXED:
        for (FileInfo fileInfo : fileInfos) {
            currentSearch.put((Long) fileInfo.getProperties().get(ContentModel.PROP_NODE_DBID), fileInfo);
        }
        break;
    case ARCHIVE:
        for (FileInfo fileInfo : fileInfos) {
            if (nodeService.hasAspect(fileInfo.getNodeRef(), ImapModel.ASPECT_IMAP_CONTENT)) {
                currentSearch.put((Long) fileInfo.getProperties().get(ContentModel.PROP_NODE_DBID), fileInfo);
            }
        }
        break;
    case VIRTUAL:
        for (FileInfo fileInfo : fileInfos) {
            if (!nodeService.hasAspect(fileInfo.getNodeRef(), ImapModel.ASPECT_IMAP_CONTENT)) {
                currentSearch.put((Long) fileInfo.getProperties().get(ContentModel.PROP_NODE_DBID), fileInfo);
            }
        }
        break;
    }

    int messageCount = currentSearch.size(), recentCount = 0, unseenCount = 0, firstUnseen = 0;
    int i = 1;
    for (FileInfo fileInfo : currentSearch.values()) {
        Flags flags = getFlags(fileInfo);
        if (flags.contains(Flags.Flag.RECENT)) {
            recentCount++;
        }
        if (!flags.contains(Flags.Flag.SEEN)) {
            if (firstUnseen == 0) {
                firstUnseen = i;
            }
            unseenCount++;
        }
        i++;
    }
    // Add the IMAP folder aspect with appropriate initial values if it is not already there
    if (changeToken == null) {
        changeToken = GUID.generate();
        cacheKey = new Pair<String, String>(userName, changeToken);
        final String finalToken = changeToken;
        doAsSystem(new RunAsWork<Void>() {
            @Override
            public Void doWork() throws Exception {
                nodeService.setProperty(contextNodeRef, ImapModel.PROP_CHANGE_TOKEN, finalToken);
                nodeService.setProperty(contextNodeRef, ImapModel.PROP_MAXUID,
                        currentSearch.isEmpty() ? 0 : currentSearch.lastKey());
                return null;
            }
        });
    }
    Long uidValidity = (Long) nodeService.getProperty(contextNodeRef, ImapModel.PROP_UIDVALIDITY);
    FolderStatus result = new FolderStatus(messageCount, recentCount, firstUnseen, unseenCount,
            uidValidity == null ? 0 : uidValidity, changeToken, currentSearch);
    this.folderCacheLock.writeLock().lock();
    try {
        FolderStatus oldResult = this.folderCache.get(cacheKey);
        if (oldResult != null) {
            if (logger.isDebugEnabled()) {
                logger.debug("At end of getFolderStatus. Found info in cache, changeToken:" + changeToken);
            }

            return oldResult;
        }
        this.folderCache.put(cacheKey, result);

        if (logger.isDebugEnabled()) {
            logger.debug("At end of getFolderStatus. Found files:" + currentSearch.size() + ", changeToken:"
                    + changeToken);
        }
        return result;
    } finally {
        this.folderCacheLock.writeLock().unlock();
    }
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java

/**
 * Get the entry position that come before the specified position in the message stream, using information from the
 * ledger list and each ledger entries count.
 *
 * @param position/*from   ww  w  .  jav  a 2s .  c  o  m*/
 *            the current position
 * @return the previous position
 */
PositionImpl getPreviousPosition(PositionImpl position) {
    if (position.getEntryId() > 0) {
        return PositionImpl.get(position.getLedgerId(), position.getEntryId() - 1);
    }

    // The previous position will be the last position of an earlier ledgers
    NavigableMap<Long, LedgerInfo> headMap = ledgers.headMap(position.getLedgerId(), false);

    if (headMap.isEmpty()) {
        // There is no previous ledger, return an invalid position in the current ledger
        return PositionImpl.get(position.getLedgerId(), -1);
    }

    // We need to find the most recent non-empty ledger
    for (long ledgerId : headMap.descendingKeySet()) {
        LedgerInfo li = headMap.get(ledgerId);
        if (li.getEntries() > 0) {
            return PositionImpl.get(li.getLedgerId(), li.getEntries() - 1);
        }
    }

    // in case there are only empty ledgers, we return a position in the first one
    return PositionImpl.get(headMap.firstEntry().getKey(), -1);
}

From source file:org.apache.hadoop.hbase.mapreduce.CrossSiteTableInputFormat.java

@Override
public List<InputSplit> getSplits(JobContext context) throws IOException {
    if (table == null) {
        throw new IOException("No table was provided.");
    }/*from w  w  w.j a v  a  2 s .c  om*/
    NavigableMap<HRegionInfo, ServerName> locations = table.getRegionLocations();
    if (!locations.isEmpty()) {
        Scan scan = getScan();
        List<InputSplit> splits = new ArrayList<InputSplit>();
        int i = 0;
        for (Entry<HRegionInfo, ServerName> location : locations.entrySet()) {
            String regionLocation = location.getValue().getHostname();
            byte[] startRow = scan.getStartRow();
            byte[] stopRow = scan.getStopRow();
            if ((startRow.length == 0 || location.getKey().getEndKey().length == 0
                    || Bytes.compareTo(startRow, location.getKey().getEndKey()) < 0)
                    && (stopRow.length == 0 || Bytes.compareTo(stopRow, location.getKey().getStartKey()) > 0)) {
                byte[] splitStart = startRow.length == 0
                        || Bytes.compareTo(location.getKey().getStartKey(), startRow) >= 0
                                ? location.getKey().getStartKey()
                                : startRow;
                byte[] splitStop = (stopRow.length == 0
                        || Bytes.compareTo(location.getKey().getEndKey(), stopRow) <= 0)
                        && location.getKey().getEndKey().length > 0 ? location.getKey().getEndKey() : stopRow;
                TableSplit split = new TableSplit(location.getKey().getTableName(), splitStart, splitStop,
                        regionLocation);
                splits.add(split);
                if (LOG.isDebugEnabled()) {
                    LOG.debug("getSplits: split -> " + i++ + " -> " + split);
                }
            }
        }
        return splits;
    } else {
        return Collections.emptyList();
    }
}

From source file:org.apache.hadoop.hbase.master.RegionManager.java

static MetaRegion getMetaRegionPointingTo(NavigableMap<byte[], MetaRegion> metaRegions, HRegionInfo newRegion) {
    if (metaRegions.isEmpty()) {
        return null;
    } else if (metaRegions.size() == 1) {
        return metaRegions.get(metaRegions.firstKey());
    } else {//from ww w . ja va 2  s.c o m
        if (metaRegions.containsKey(newRegion.getRegionName())) {
            return metaRegions.get(newRegion.getRegionName());
        }
        return metaRegions.get(metaRegions.headMap(newRegion.getRegionName()).lastKey());
    }
}

From source file:org.apache.hadoop.hbase.replication.regionserver.Replication.java

/**
 * Utility method used to set the correct scopes on each log key. Doesn't set a scope on keys
 * from compaction WAL edits and if the scope is local.
 * @param htd Descriptor used to find the scope to use
 * @param logKey Key that may get scoped according to its edits
 * @param logEdit Edits used to lookup the scopes
 *//*from   w  w  w .j  ava  2  s  .  c  om*/
public static void scopeWALEdits(HTableDescriptor htd, HLogKey logKey, WALEdit logEdit) {
    NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    byte[] family;
    for (KeyValue kv : logEdit.getKeyValues()) {
        family = kv.getFamily();
        // This is expected and the KV should not be replicated
        if (CellUtil.matchingFamily(kv, WALEdit.METAFAMILY))
            continue;
        // Unexpected, has a tendency to happen in unit tests
        assert htd.getFamily(family) != null;

        int scope = htd.getFamily(family).getScope();
        if (scope != REPLICATION_SCOPE_LOCAL && !scopes.containsKey(family)) {
            scopes.put(family, scope);
        }
    }
    if (!scopes.isEmpty()) {
        logKey.setScopes(scopes);
    }
}

From source file:org.apache.hadoop.hbase.replication.ScopeWALEntryFilter.java

@Override
public Entry filter(Entry entry) {
    NavigableMap<byte[], Integer> scopes = entry.getKey().getReplicationScopes();
    if (scopes == null || scopes.isEmpty()) {
        return null;
    }/*from  www.  j  av a  2  s .  c  om*/
    ArrayList<Cell> cells = entry.getEdit().getCells();
    int size = cells.size();
    byte[] fam;
    for (int i = size - 1; i >= 0; i--) {
        Cell cell = cells.get(i);
        // If a bulk load entry has a scope then that means user has enabled replication for bulk load
        // hfiles.
        // TODO There is a similar logic in TableCfWALEntryFilter but data structures are different so
        // cannot refactor into one now, can revisit and see if any way to unify them.
        if (CellUtil.matchingColumn(cell, WALEdit.METAFAMILY, WALEdit.BULK_LOAD)) {
            Cell filteredBulkLoadEntryCell = filterBulkLoadEntries(scopes, cell);
            if (filteredBulkLoadEntryCell != null) {
                cells.set(i, filteredBulkLoadEntryCell);
            } else {
                cells.remove(i);
            }
        } else {
            // The scope will be null or empty if
            // there's nothing to replicate in that WALEdit
            fam = CellUtil.cloneFamily(cell);
            if (!scopes.containsKey(fam) || scopes.get(fam) == HConstants.REPLICATION_SCOPE_LOCAL) {
                cells.remove(i);
            }
        }
    }
    if (cells.size() < size / 2) {
        cells.trimToSize();
    }
    return entry;
}

From source file:org.apache.kylin.rest.service.AclTableMigrationTool.java

private Map<String, AceInfo> getAllAceInfo(Result result) throws IOException {
    Map<String, AceInfo> allAceInfoMap = new HashMap<>();
    NavigableMap<byte[], byte[]> familyMap = result.getFamilyMap(Bytes.toBytes(AclConstant.ACL_ACES_FAMILY));
    if (familyMap != null && !familyMap.isEmpty()) {
        for (Map.Entry<byte[], byte[]> entry : familyMap.entrySet()) {
            String sid = new String(entry.getKey());
            AceInfo aceInfo = aceSerializer.deserialize(entry.getValue());
            if (null != aceInfo) {
                allAceInfoMap.put(sid, aceInfo);
            }//  www.  ja  va 2s  .  c o  m
        }
    }
    return allAceInfoMap;
}