Example usage for java.util TreeMap values

List of usage examples for java.util TreeMap values

Introduction

In this page you can find the example usage for java.util TreeMap values.

Prototype

public Collection<V> values() 

Source Link

Document

Returns a Collection view of the values contained in this map.

Usage

From source file:com.celements.navigation.service.TreeNodeService.java

private List<TreeNode> fetchNodesForParentKey_internal(String parentKey, long starttotal, long start) {
    List<TreeNode> notMappedmenuItems = treeNodeCache.getNotMappedMenuItemsForParentCmd()
            .getTreeNodesForParentKey(parentKey, getContext());
    long end = System.currentTimeMillis();
    LOGGER.debug("fetchNodesForParentKey_internal: time for" + " getNotMappedMenuItemsFromDatabase: "
            + (end - start));/*from   ww  w  .ja v  a 2 s.c  o m*/
    start = System.currentTimeMillis();
    List<TreeNode> mappedTreeNodes = treeNodeCache.getMappedMenuItemsForParentCmd()
            .getTreeNodesForParentKey(parentKey, getContext());
    end = System.currentTimeMillis();
    LOGGER.debug(
            "fetchNodesForParentKey_internal: time for" + " getMappedMenuItemsForParentCmd: " + (end - start));
    start = System.currentTimeMillis();
    TreeMap<Integer, TreeNode> menuItemsMergedMap = null;
    if ((notMappedmenuItems == null) || (notMappedmenuItems.size() == 0)) {
        end = System.currentTimeMillis();
        LOGGER.info("fetchNodesForParentKey_internal: [" + parentKey + "] totaltime for list of ["
                + mappedTreeNodes.size() + "]: " + (end - starttotal));
        return mappedTreeNodes;
    } else if (mappedTreeNodes.size() == 0) {
        end = System.currentTimeMillis();
        LOGGER.info("fetchNodesForParentKey_internal: [" + parentKey + "] totaltime for list of ["
                + notMappedmenuItems.size() + "]: " + (end - starttotal));
        return notMappedmenuItems;
    } else {
        menuItemsMergedMap = new TreeMap<Integer, TreeNode>();
        for (TreeNode node : notMappedmenuItems) {
            menuItemsMergedMap.put(new Integer(node.getPosition()), node);
        }
        for (TreeNode node : mappedTreeNodes) {
            menuItemsMergedMap.put(new Integer(node.getPosition()), node);
        }
        end = System.currentTimeMillis();
        LOGGER.debug("fetchNodesForParentKey_internal: time for merging menu items: " + (end - start));
        ArrayList<TreeNode> menuItems = new ArrayList<TreeNode>(menuItemsMergedMap.values());
        LOGGER.info("fetchNodesForParentKey_internal: [" + parentKey + "] totaltime for list of ["
                + menuItems.size() + "]: " + (end - starttotal));
        return menuItems;
    }
}

From source file:com.yahoo.elide.core.PersistentResource.java

/**
 * Get relationship mappings./*from   w w  w. ja v a  2 s. c o  m*/
 *
 * @return Relationship mapping
 */
protected Map<String, Relationship> getRelationships() {
    final Map<String, Relationship> relationshipMap = new LinkedHashMap<>();
    final Set<String> relationshipFields = filterFields(ReadPermission.class, this,
            dictionary.getRelationships(obj));

    for (String field : relationshipFields) {
        Set<PersistentResource> relationships = getRelation(field);
        TreeMap<String, Resource> orderedById = new TreeMap<>(comparator);
        for (PersistentResource relationship : relationships) {
            orderedById.put(relationship.getId(),
                    new ResourceIdentifier(relationship.getType(), relationship.getId()).castToResource());

        }
        Collection<Resource> resources = orderedById.values();

        Data<Resource> data;
        RelationshipType relationshipType = getRelationshipType(field);
        if (relationshipType.isToOne()) {
            data = resources.isEmpty() ? new Data<>((Resource) null) : new Data<>(resources.iterator().next());
        } else {
            data = new Data<>(resources);
        }
        // TODO - links
        relationshipMap.put(field, new Relationship(null, data));
    }

    return relationshipMap;
}

From source file:com.edgenius.wiki.gwt.server.SecurityControllerImpl.java

public UserProfileModel getUserContributed(String username, int type) {
    User viewer = WikiUtil.getUser();// w ww.  j a  va  2s.c  om

    UserProfileModel profile = new UserProfileModel();

    if (type == 0 || (type & SharedConstants.SPACE) > 0) {
        //this indicate to client side to say spaceList is refreshed.
        profile.spaces = new ArrayList<SpaceModel>();

        List<Space> spaces = spaceService.getUserAllCreatedSpaces(username, -1, viewer);
        for (Space space : spaces) {
            SpaceModel model = new SpaceModel();
            SpaceUtil.copySpaceToModel(space, model, viewer, themeService);
            profile.spaces.add(model);
        }
    }

    //Now, history and page are same
    if (type == 0 || (type & SharedConstants.PAGE) > 0 || (type & SharedConstants.HISTORY) > 0) {
        //this indicate to client side to say pageList is refreshed.
        profile.pages = new ArrayList<PageItemModel>();

        List<Page> pages = pageService.getUserAllContributedPages(username, -1, viewer);
        List<History> histories = pageService.getUserAllContributedHistories(username, viewer);

        Map<String, PageItemModel> map = new HashMap<String, PageItemModel>();
        //merge them by pageUUID
        for (Page page : pages) {
            PageItemModel item = map.get(page.getPageUuid());
            if (item == null) {
                item = PageUtil.copyToPageItem(page);
                //sort versionHistory creator->2,...5->current etc.
                item.versionHistory = new HashMap<Integer, PageItemModel>();
                map.put(page.getPageUuid(), item);
            }
            if ((page.getCreator() != null && StringUtils.equals(page.getCreator().getUsername(), username))
                    || (page.getCreator() == null
                            && (username == null || User.ANONYMOUS_USERNAME.equalsIgnoreCase(username)))) {
                PageItemModel ver = new PageItemModel();
                ver.uid = page.getUid();
                ver.modifiedDate = DateUtil.getLocalDate(viewer, page.getCreatedDate());
                item.versionHistory.put(0, ver);
            }
            if ((page.getModifier() != null && StringUtils.equals(page.getModifier().getUsername(), username))
                    || (page.getModifier() == null
                            && (username == null || User.ANONYMOUS_USERNAME.equalsIgnoreCase(username)))) {
                PageItemModel ver = new PageItemModel();
                ver.uid = page.getUid();
                ver.modifiedDate = DateUtil.getLocalDate(viewer, page.getModifiedDate());
                item.versionHistory.put(Integer.MAX_VALUE, ver);
            }
        }
        for (History history : histories) {
            PageItemModel item = map.get(history.getPageUuid());
            if (item == null) {
                item = PageUtil.copyToPageItem(history);
                item.versionHistory = new HashMap<Integer, PageItemModel>();
                map.put(history.getPageUuid(), item);
            }
            PageItemModel ver = new PageItemModel();
            ver.uid = history.getUid();
            ver.modifiedDate = DateUtil.getLocalDate(viewer, history.getModifiedDate());
            item.versionHistory.put(history.getVersion(), ver);
        }

        //default sort pages by space name
        TreeMap<String, PageItemModel> sortedPages = new TreeMap<String, PageItemModel>(
                new Comparator<String>() {
                    public int compare(String o1, String o2) {
                        int ret = o1.compareTo(o2);
                        //don't overwrite same spaceUname pages
                        return ret == 0 ? 1 : ret;
                    }

                });
        for (PageItemModel item : map.values()) {
            sortedPages.put(item.spaceUname, item);
        }

        profile.pages = new ArrayList<PageItemModel>(sortedPages.values());
    }
    if (type == 0 || (type & SharedConstants.ACTIVITY) > 0) {
        //this indicate to client side to say spaceList is refreshed.
        profile.activities = new ArrayList<ActivityModel>();
        User user = userReadingService.getUserByName(username);
        //TODO: now only get first 15 activities;
        List<ActivityLog> msgs = activityLog.getUserActivities(0, 15, user, WikiUtil.getUser());
        for (ActivityLog msg : msgs) {
            ActivityModel act = new ActivityModel();
            act.activity = msg.getMessage();
            profile.activities.add(act);
        }

    }
    return profile;
}

From source file:org.catrobat.jira.adminhelper.rest.UserRest.java

private Response searchUser(String query, HttpServletRequest request) {
    Response unauthorized = checkPermission(request);
    if (unauthorized != null) {
        return unauthorized;
    }//from ww  w  .  ja va 2  s. c  o  m
    if (lendingService == null) {
        return Response.serverError().entity("Lending Service must not be null").build();
    }

    if (query == null || query.length() < 1) {
        return Response.ok(new ArrayList<JsonUser>()).build();
    }

    query = StringEscapeUtils.escapeHtml4(query);

    com.atlassian.jira.user.util.UserManager jiraUserManager = ComponentAccessor.getUserManager();
    TreeMap<String, JsonUser> jsonUsers = new TreeMap<String, JsonUser>();
    for (ApplicationUser user : jiraUserManager.getAllUsers()) {
        if (user.getName().toLowerCase().contains(query.toLowerCase())
                || user.getDisplayName().toLowerCase().contains(query.toLowerCase())) {
            JsonUser jsonUser = new JsonUser();
            jsonUser.setUserName(user.getKey());
            jsonUser.setDisplayName(user.getDisplayName());
            jsonUsers.put(user.getName().toLowerCase(), jsonUser);
        }
    }

    for (Lending lending : lendingService.all()) {
        if (lending.getLendingByUserKey().toLowerCase().contains(query.toLowerCase())
                && !jsonUsers.containsKey(lending.getLendingByUserKey().toLowerCase())) {
            JsonUser jsonUser = new JsonUser();
            jsonUser.setUserName(lending.getLendingByUserKey());
            jsonUser.setDisplayName(lending.getLendingByUserKey());
            String userKey = jsonUser.getUserName().toLowerCase().replaceAll(" ", "").replaceAll("&auml;", "ae")
                    .replaceAll("&ouml;", "oe").replaceAll("&uuml;", "ue").replaceAll("&szlig;", "ss");
            jsonUsers.put(userKey, jsonUser);
        }
    }

    return Response.ok(jsonUsers.values()).build();
}

From source file:org.apache.oozie.service.ZKXLogStreamingService.java

/**
 * Contacts each of the other Oozie servers, gets their logs for the job, collates them, and sends them to the user via the
 * Writer.  It will make sure to not read all of the log messages into memory at the same time to not use up the heap.  If there
 * is a problem talking to one of the other servers, it will ignore that server and prepend a message to the Writer about it.
 * For getting the logs from this server, it won't use the REST API and instead get them directly to be more efficient.
 *
 * @param logStreamer the XLogStreamer/*from   w ww .ja v  a  2 s.c o  m*/
 * @param startTime the job start time
 * @param endTime the job end time
 * @param writer the writer
 * @throws IOException Signals that an I/O exception has occurred.
 */
private void collateLogs(XLogStreamer logStreamer, Date startTime, Date endTime, Writer writer)
        throws IOException {
    List<String> badOozies = new ArrayList<String>();
    List<ServiceInstance<Map>> oozies = null;
    try {
        oozies = zk.getAllMetaData();
    } catch (Exception ex) {
        throw new IOException("Issue communicating with ZooKeeper: " + ex.getMessage(), ex);
    }
    List<TimestampedMessageParser> parsers = new ArrayList<TimestampedMessageParser>(oozies.size());
    try {
        // Create a BufferedReader for getting the logs of each server and put them in a TimestampedMessageParser
        for (ServiceInstance<Map> oozie : oozies) {
            Map<String, String> oozieMeta = oozie.getPayload();
            String otherId = oozieMeta.get(ZKUtils.ZKMetadataKeys.OOZIE_ID);
            // If it's this server, we can just get them directly
            if (otherId.equals(zk.getZKId())) {
                BufferedReader reader = logStreamer.makeReader(startTime, endTime);
                parsers.add(new TimestampedMessageParser(reader, logStreamer.getXLogFilter()));
            }
            // If it's another server, we'll have to use the REST API
            else {
                String otherUrl = oozieMeta.get(ZKUtils.ZKMetadataKeys.OOZIE_URL);
                String jobId = logStreamer.getXLogFilter().getFilterParams().get(DagXLogInfoService.JOB);
                try {
                    // It's important that we specify ALL_SERVERS_PARAM=false in the GET request to prevent the other Oozie
                    // Server from trying aggregate logs from the other Oozie servers (and creating an infinite recursion)
                    final String url = otherUrl + "/v" + OozieClient.WS_PROTOCOL_VERSION + "/"
                            + RestConstants.JOB + "/" + jobId + "?" + RestConstants.JOB_SHOW_PARAM + "="
                            + logStreamer.getLogType() + "&" + RestConstants.ALL_SERVER_REQUEST + "=false"
                            + AuthUrlClient.getQueryParamString(logStreamer.getRequestParam());
                    // remove doAs from url to avoid failure while fetching
                    // logs in case of HA mode
                    String key = "doAs";
                    String[] value = null;
                    if (logStreamer.getRequestParam() != null) {
                        value = logStreamer.getRequestParam().get(key);
                    }
                    String urlWithoutdoAs = null;
                    if (value != null && value.length > 0 && value[0] != null && value[0].length() > 0) {
                        urlWithoutdoAs = url.replace("&" + key + "=" + URLEncoder.encode(value[0], "UTF-8"),
                                "");
                    } else {
                        urlWithoutdoAs = url;
                    }
                    BufferedReader reader = AuthUrlClient.callServer(urlWithoutdoAs);
                    parsers.add(new SimpleTimestampedMessageParser(reader, logStreamer.getXLogFilter()));
                } catch (IOException ioe) {
                    log.warn(
                            "Failed to retrieve logs for job [" + jobId + "] from Oozie server with ID ["
                                    + otherId + "] at [" + otherUrl + "]; log information may be incomplete",
                            ioe);
                    badOozies.add(otherId);
                }
            }
        }

        //If log param debug is set, we need to write start date and end date to outputstream.
        if (!StringUtils.isEmpty(logStreamer.getXLogFilter().getTruncatedMessage())) {
            writer.write(logStreamer.getXLogFilter().getTruncatedMessage());
        }

        if (logStreamer.getXLogFilter().isDebugMode()) {
            writer.write(logStreamer.getXLogFilter().getDebugMessage());
        }
        // Add a message about any servers we couldn't contact
        if (!badOozies.isEmpty()) {
            writer.write(
                    "Unable to contact the following Oozie Servers for logs (log information may be incomplete):\n");
            for (String badOozie : badOozies) {
                writer.write("     ");
                writer.write(badOozie);
                writer.write("\n");
            }
            writer.write("\n");
            writer.flush();
        }

        // If it's just the one server (this server), then we don't need to do any more processing and can just copy it directly
        if (parsers.size() == 1) {
            TimestampedMessageParser parser = parsers.get(0);
            parser.processRemaining(writer, logStreamer);
        } else {
            // Now that we have a Reader for each server to get the logs from that server, we have to collate them.  Within each
            // server, the logs should already be in the correct order, so we can take advantage of that.  We'll use the
            // BufferedReaders to read the messages from the logs of each server and put them in order without having to bring
            // every message into memory at the same time.
            TreeMap<String, TimestampedMessageParser> timestampMap = new TreeMap<String, TimestampedMessageParser>();
            // populate timestampMap with initial values
            for (TimestampedMessageParser parser : parsers) {
                if (parser.increment()) {
                    timestampMap.put(parser.getLastTimestamp(), parser);
                }
            }
            while (timestampMap.size() > 1) {
                // The first entry will be the earliest based on the timestamp (also removes it) from the map
                TimestampedMessageParser earliestParser = timestampMap.pollFirstEntry().getValue();
                // Write the message from that parser at that timestamp
                writer.write(earliestParser.getLastMessage());
                if (logStreamer.shouldFlushOutput(earliestParser.getLastMessage().length())) {
                    writer.flush();
                }
                // Increment that parser to read the next message
                if (earliestParser.increment()) {
                    // If it still has messages left, put it back in the map with the new last timestamp for it
                    timestampMap.put(earliestParser.getLastTimestamp(), earliestParser);
                }
            }
            // If there's only one parser left in the map, then we can simply copy the rest of its lines directly to be faster
            if (timestampMap.size() == 1) {
                TimestampedMessageParser parser = timestampMap.values().iterator().next();
                writer.write(parser.getLastMessage()); // don't forget the last message read by the parser
                parser.processRemaining(writer, logStreamer);
            }
        }
    } finally {
        for (TimestampedMessageParser parser : parsers) {
            parser.closeReader();
        }
    }
}

From source file:org.hyperic.hq.authz.server.session.RoleManagerImpl.java

/**
 * Return the roles of a group//from   w ww . j a v  a 2  s  .  c o  m
 * 
 * @throws PermissionException
 * 
 * 
 */
@Transactional(readOnly = true)
public PageList<RoleValue> getResourceGroupRoles(AuthzSubject whoami, Integer groupId, PageControl pc)
        throws PermissionException {
    ResourceGroup resGrp = resourceGroupDAO.findById(groupId);

    permissionManager.check(whoami.getId(), AuthzConstants.authzGroup, resGrp.getId(),
            AuthzConstants.perm_viewResourceGroup);

    Collection<Role> roles = resGrp.getRoles();

    TreeMap<String, Role> map = new TreeMap<String, Role>();
    for (Role role : roles) {
        int attr = pc.getSortattribute();
        switch (attr) {
        case SortAttribute.ROLE_NAME:
        default:
            map.put(role.getName(), role);
        }
    }

    ArrayList<Role> list = new ArrayList<Role>(map.values());

    if (pc.isDescending()) {
        Collections.reverse(list);
    }

    PageList<RoleValue> plist = rolePager.seek(list, pc.getPagenum(), pc.getPagesize());
    plist.setTotalSize(roles.size());

    return plist;
}

From source file:org.lockss.config.Tdb.java

/** Print a full description of all elements in the Tdb */
public void prettyPrint(PrintStream ps) {
    ps.println("Tdb");
    TreeMap<String, TdbPublisher> sorted = new TreeMap<String, TdbPublisher>(
            CatalogueOrderComparator.SINGLETON);
    sorted.putAll(getAllTdbPublishers());
    for (TdbPublisher tdbPublisher : sorted.values()) {
        tdbPublisher.prettyPrint(ps, 2);
    }//from w w  w.  j a va  2 s.c om
}

From source file:xc.mst.harvester.HarvestManager.java

private File[] reallySortFiles(File[] files) {
    TreeMap<Long, File> map = new TreeMap<Long, File>();
    for (File file : files) {
        if (file.getName().startsWith("initial")) { // want this one to be 1st.
            map.put(0l, file);/* w w w .  j a  v  a2 s.co  m*/
        } else {
            StringTokenizer st = new StringTokenizer(file.getName(), "_");
            try {
                st.nextToken();
                st.nextToken();
            } catch (NoSuchElementException e) {
                LOG.error("HarvestManager, trying to harvest from file, unexpected exception handling file "
                        + file.toString(), e);
                return files;
            }
            try {
                long tokL = Long.parseLong(st.nextToken());
                map.put(tokL, file);
            } catch (NumberFormatException nfe) {
                LOG.error("HarvestManager, trying to harvest from file, unexpected exception handling file "
                        + file.toString(), nfe);
                return files;
            }
        }
    }
    Collection<File> collection = map.values();
    return collection.toArray(new File[0]);
}

From source file:com.upplication.s3fs.util.AmazonS3ClientMock.java

/**
 * list all objects without and return ObjectListing with all elements
 * and with truncated to false/*from w w w.j a  va 2  s . c  o m*/
 */
@Override
public ObjectListing listObjects(ListObjectsRequest listObjectsRequest) throws AmazonClientException {
    String bucketName = listObjectsRequest.getBucketName();
    String prefix = listObjectsRequest.getPrefix();
    String marker = listObjectsRequest.getMarker();
    String delimiter = listObjectsRequest.getDelimiter();

    ObjectListing objectListing = new ObjectListing();
    objectListing.setBucketName(bucketName);
    objectListing.setPrefix(prefix);
    objectListing.setMarker(marker);
    objectListing.setDelimiter(delimiter);

    final Path bucket = find(bucketName);
    final TreeMap<String, S3Element> elems = new TreeMap<>();
    try {
        for (Path elem : Files.newDirectoryStream(bucket)) {
            S3Element element = parse(elem, bucket);
            if (!elems.containsKey(element.getS3Object().getKey()))
                elems.put(element.getS3Object().getKey(), element);
        }
    } catch (IOException e) {
        throw new AmazonClientException(e);
    }
    Iterator<S3Element> iterator = elems.values().iterator();
    int i = 0;
    boolean waitForMarker = !StringUtils.isNullOrEmpty(marker);
    while (iterator.hasNext()) {
        S3Element elem = iterator.next();
        if (elem.getS3Object().getKey().equals("/"))
            continue;
        String key = elem.getS3Object().getKey();
        if (waitForMarker) {
            waitForMarker = !key.startsWith(marker);
            if (waitForMarker)
                continue;
        }

        if (prefix != null && key.startsWith(prefix)) {
            int beginIndex = key.indexOf(prefix) + prefix.length();
            String rest = key.substring(beginIndex);
            if (delimiter != null && delimiter.length() > 0 && rest.contains(delimiter)) {
                String substring = key.substring(0, beginIndex + rest.indexOf(delimiter));
                if (!objectListing.getCommonPrefixes().contains(substring))
                    objectListing.getCommonPrefixes().add(substring);
                continue;
            }
            S3ObjectSummary s3ObjectSummary = parseToS3ObjectSummary(elem);
            objectListing.getObjectSummaries().add(s3ObjectSummary);

            if (i + 1 == LIMIT_AWS_MAX_ELEMENTS && iterator.hasNext()) {
                objectListing.setTruncated(true);
                objectListing.setNextMarker(iterator.next().getS3Object().getKey());
                return objectListing;
            }
            objectListing.setTruncated(false);

            i++;
        }

    }
    Collections.sort(objectListing.getObjectSummaries(), new Comparator<S3ObjectSummary>() {
        @Override
        public int compare(S3ObjectSummary o1, S3ObjectSummary o2) {
            return o1.getKey().compareTo(o2.getKey());
        }
    });
    return objectListing;
}

From source file:delfos.group.results.groupevaluationmeasures.MAE_byGroupStdDev.java

@Override
public GroupEvaluationMeasureResult getMeasureResult(GroupRecommenderSystemResult groupRecommenderSystemResult,
        DatasetLoader<? extends Rating> originalDatasetLoader, RelevanceCriteria relevanceCriteria,
        DatasetLoader<? extends Rating> trainingDatasetLoader,
        DatasetLoader<? extends Rating> testDatasetLoader) {

    TreeMap<GroupOfUsers, MeanIterative> maeGroups = new TreeMap<>();

    for (GroupOfUsers groupOfUsers : groupRecommenderSystemResult.getGroupsOfUsers()) {
        Collection<Recommendation> groupRecommendations = groupRecommenderSystemResult
                .getGroupOutput(groupOfUsers).getRecommendations().getRecommendations();

        if (groupRecommendations.isEmpty()) {
            continue;
        }//from  ww w.j a  va2  s .  com
        MeanIterative maeGroup = new MeanIterative();

        Map<Integer, Map<Integer, ? extends Rating>> groupTrueRatings = new TreeMap<>();

        groupOfUsers.getIdMembers().stream().forEach((idUser) -> {
            try {
                groupTrueRatings.put(idUser, testDatasetLoader.getRatingsDataset().getUserRatingsRated(idUser));
            } catch (UserNotFound ex) {
                ERROR_CODES.USER_NOT_FOUND.exit(ex);
            }
        });

        for (Recommendation recommendation : groupRecommendations) {
            if (Double.isNaN(recommendation.getPreference().doubleValue())) {
                continue;
            }
            int idItem = recommendation.getItem().getId();
            for (int idUser : groupOfUsers.getIdMembers()) {
                if (groupTrueRatings.get(idUser).containsKey(idItem)) {
                    double trueRating = groupTrueRatings.get(idUser).get(idItem).getRatingValue().doubleValue();
                    double predicted = recommendation.getPreference().doubleValue();
                    double absoluteError = Math.abs(predicted - trueRating);

                    maeGroup.addValue(absoluteError);
                }
            }
        }

        maeGroups.put(groupOfUsers, maeGroup);

    }

    double[] maesByGroup = maeGroups.values().parallelStream().mapToDouble(maeGroup -> maeGroup.getMean())
            .filter(value -> !Double.isNaN(value)).toArray();

    double maeByGroupStdDev = new StandardDeviation().evaluate(maesByGroup);

    if (maesByGroup.length == 0) {
        return new GroupEvaluationMeasureResult(this, Double.NaN);
    } else {
        return new GroupEvaluationMeasureResult(this, maeByGroupStdDev);
    }
}