Example usage for java.util TreeMap values

List of usage examples for java.util TreeMap values

Introduction

In this page you can find the example usage for java.util TreeMap values.

Prototype

public Collection<V> values() 

Source Link

Document

Returns a Collection view of the values contained in this map.

Usage

From source file:io.apiman.manager.api.rest.impl.OrganizationResourceImpl.java

/**
 * @see io.apiman.manager.api.rest.contract.IOrganizationResource#listMembers(java.lang.String)
 *//*from w  ww .j a v  a  2s.c o m*/
@Override
public List<MemberBean> listMembers(String organizationId)
        throws OrganizationNotFoundException, NotAuthorizedException {
    get(organizationId);

    try {
        Set<RoleMembershipBean> memberships = query.getOrgMemberships(organizationId);
        TreeMap<String, MemberBean> members = new TreeMap<>();
        storage.beginTx();
        for (RoleMembershipBean membershipBean : memberships) {
            String userId = membershipBean.getUserId();
            String roleId = membershipBean.getRoleId();
            RoleBean role = storage.getRole(roleId);

            // Role does not exist!
            if (role == null) {
                continue;
            }

            MemberBean member = members.get(userId);
            if (member == null) {
                UserBean user = storage.getUser(userId);
                member = new MemberBean();
                member.setEmail(user.getEmail());
                member.setUserId(userId);
                member.setUserName(user.getFullName());
                member.setRoles(new ArrayList<>());
                members.put(userId, member);
            }
            MemberRoleBean mrb = new MemberRoleBean();
            mrb.setRoleId(roleId);
            mrb.setRoleName(role.getName());
            member.getRoles().add(mrb);
            if (member.getJoinedOn() == null
                    || membershipBean.getCreatedOn().compareTo(member.getJoinedOn()) < 0) {
                member.setJoinedOn(membershipBean.getCreatedOn());
            }
        }
        return new ArrayList<>(members.values());
    } catch (StorageException e) {
        throw new SystemErrorException(e);
    } finally {
        storage.rollbackTx();
    }
}

From source file:org.opennms.netmgt.config.WmiPeerFactory.java

/**
 * Combine specific and range elements so that WMIPeerFactory has to spend
 * less time iterating all these elements.
 * TODO This really should be pulled up into PeerFactory somehow, but I'm not sure how (given that "Definition" is different for both
 * SNMP and WMI.  Maybe some sort of visitor methodology would work.  The basic logic should be fine as it's all IP address manipulation
 *
 * @throws UnknownHostException//from  w  w w . jav  a2 s. com
 */
static void optimize() throws UnknownHostException {

    // First pass: Remove empty definition elements
    for (Iterator<Definition> definitionsIterator = m_config.getDefinitionCollection()
            .iterator(); definitionsIterator.hasNext();) {
        Definition definition = definitionsIterator.next();
        if (definition.getSpecificCount() == 0 && definition.getRangeCount() == 0) {

            LOG.debug("optimize: Removing empty definition element");
            definitionsIterator.remove();
        }
    }

    // Second pass: Replace single IP range elements with specific elements
    for (Definition definition : m_config.getDefinitionCollection()) {
        synchronized (definition) {
            for (Iterator<Range> rangesIterator = definition.getRangeCollection().iterator(); rangesIterator
                    .hasNext();) {
                Range range = rangesIterator.next();
                if (range.getBegin().equals(range.getEnd())) {
                    definition.addSpecific(range.getBegin());
                    rangesIterator.remove();
                }
            }
        }
    }

    // Third pass: Sort specific and range elements for improved XML
    // readability and then combine them into fewer elements where possible
    for (Iterator<Definition> defIterator = m_config.getDefinitionCollection().iterator(); defIterator
            .hasNext();) {
        Definition definition = defIterator.next();

        // Sort specifics
        final TreeMap<InetAddress, String> specificsMap = new TreeMap<InetAddress, String>(
                new InetAddressComparator());
        for (String specific : definition.getSpecificCollection()) {
            specificsMap.put(InetAddressUtils.getInetAddress(specific), specific.trim());
        }

        // Sort ranges
        final TreeMap<InetAddress, Range> rangesMap = new TreeMap<InetAddress, Range>(
                new InetAddressComparator());
        for (Range range : definition.getRangeCollection()) {
            rangesMap.put(InetAddressUtils.getInetAddress(range.getBegin()), range);
        }

        // Combine consecutive specifics into ranges
        InetAddress priorSpecific = null;
        Range addedRange = null;
        for (final InetAddress specific : specificsMap.keySet()) {
            if (priorSpecific == null) {
                priorSpecific = specific;
                continue;
            }

            if (BigInteger.ONE.equals(InetAddressUtils.difference(specific, priorSpecific))
                    && InetAddressUtils.inSameScope(specific, priorSpecific)) {
                if (addedRange == null) {
                    addedRange = new Range();
                    addedRange.setBegin(InetAddressUtils.toIpAddrString(priorSpecific));
                    rangesMap.put(priorSpecific, addedRange);
                    specificsMap.remove(priorSpecific);
                }

                addedRange.setEnd(InetAddressUtils.toIpAddrString(specific));
                specificsMap.remove(specific);
            } else {
                addedRange = null;
            }

            priorSpecific = specific;
        }

        // Move specifics to ranges
        for (final InetAddress specific : new ArrayList<InetAddress>(specificsMap.keySet())) {
            for (final InetAddress begin : new ArrayList<InetAddress>(rangesMap.keySet())) {
                if (!InetAddressUtils.inSameScope(begin, specific)) {
                    continue;
                }

                if (InetAddressUtils.toInteger(begin).subtract(BigInteger.ONE)
                        .compareTo(InetAddressUtils.toInteger(specific)) > 0) {
                    continue;
                }

                Range range = rangesMap.get(begin);

                final InetAddress end = InetAddressUtils.getInetAddress(range.getEnd());

                if (InetAddressUtils.toInteger(end).add(BigInteger.ONE)
                        .compareTo(InetAddressUtils.toInteger(specific)) < 0) {
                    continue;
                }

                if (InetAddressUtils.toInteger(specific).compareTo(InetAddressUtils.toInteger(begin)) >= 0
                        && InetAddressUtils.toInteger(specific)
                                .compareTo(InetAddressUtils.toInteger(end)) <= 0) {
                    specificsMap.remove(specific);
                    break;
                }

                if (InetAddressUtils.toInteger(begin).subtract(BigInteger.ONE)
                        .equals(InetAddressUtils.toInteger(specific))) {
                    rangesMap.remove(begin);
                    rangesMap.put(specific, range);
                    range.setBegin(InetAddressUtils.toIpAddrString(specific));
                    specificsMap.remove(specific);
                    break;
                }

                if (InetAddressUtils.toInteger(end).add(BigInteger.ONE)
                        .equals(InetAddressUtils.toInteger(specific))) {
                    range.setEnd(InetAddressUtils.toIpAddrString(specific));
                    specificsMap.remove(specific);
                    break;
                }
            }
        }

        // Combine consecutive ranges
        Range priorRange = null;
        InetAddress priorBegin = null;
        InetAddress priorEnd = null;
        for (final Iterator<InetAddress> rangesIterator = rangesMap.keySet().iterator(); rangesIterator
                .hasNext();) {
            final InetAddress beginAddress = rangesIterator.next();
            final Range range = rangesMap.get(beginAddress);
            final InetAddress endAddress = InetAddressUtils.getInetAddress(range.getEnd());

            if (priorRange != null) {
                if (InetAddressUtils.inSameScope(beginAddress, priorEnd)
                        && InetAddressUtils.difference(beginAddress, priorEnd).compareTo(BigInteger.ONE) <= 0) {
                    priorBegin = new InetAddressComparator().compare(priorBegin, beginAddress) < 0 ? priorBegin
                            : beginAddress;
                    priorRange.setBegin(InetAddressUtils.toIpAddrString(priorBegin));
                    priorEnd = new InetAddressComparator().compare(priorEnd, endAddress) > 0 ? priorEnd
                            : endAddress;
                    priorRange.setEnd(InetAddressUtils.toIpAddrString(priorEnd));

                    rangesIterator.remove();
                    continue;
                }
            }

            priorRange = range;
            priorBegin = beginAddress;
            priorEnd = endAddress;
        }

        // Update changes made to sorted maps
        definition.setSpecific(specificsMap.values().toArray(new String[0]));
        definition.setRange(rangesMap.values().toArray(new Range[0]));
    }
}

From source file:com.irccloud.android.activity.MainActivity.java

@Override
public void onBufferSelected(int bid) {
    launchBid = -1;// www . j  ava  2 s. c  o m
    launchURI = null;
    cidToOpen = -1;
    bufferToOpen = null;
    setIntent(new Intent(this, MainActivity.class));

    if (suggestionsTimerTask != null)
        suggestionsTimerTask.cancel();
    sortedChannels = null;
    sortedUsers = null;

    if (drawerLayout != null) {
        drawerLayout.closeDrawers();
    }
    if (bid != -1 && conn != null && conn.getUserInfo() != null) {
        conn.getUserInfo().last_selected_bid = bid;
    }
    for (int i = 0; i < backStack.size(); i++) {
        if (buffer != null && backStack.get(i) == buffer.bid)
            backStack.remove(i);
    }
    if (buffer != null && buffer.bid >= 0 && bid != buffer.bid) {
        backStack.add(0, buffer.bid);
        buffer.draft = messageTxt.getText().toString();
    }
    if (buffer == null || buffer.bid == -1 || buffer.cid == -1 || buffer.bid == bid)
        shouldFadeIn = false;
    else
        shouldFadeIn = true;
    buffer = BuffersDataSource.getInstance().getBuffer(bid);
    if (buffer != null) {
        Crashlytics.log(Log.DEBUG, "IRCCloud",
                "Buffer selected: cid" + buffer.cid + " bid" + bid + " shouldFadeIn: " + shouldFadeIn);
        server = ServersDataSource.getInstance().getServer(buffer.cid);

        try {
            TreeMap<Long, EventsDataSource.Event> events = EventsDataSource.getInstance()
                    .getEventsForBuffer(buffer.bid);
            if (events != null) {
                events = (TreeMap<Long, EventsDataSource.Event>) events.clone();
                for (EventsDataSource.Event e : events.values()) {
                    if (e != null && e.highlight && e.from != null) {
                        UsersDataSource.User u = UsersDataSource.getInstance().getUser(buffer.bid, e.from);
                        if (u != null && u.last_mention < e.eid)
                            u.last_mention = e.eid;
                    }
                }
            }
        } catch (Exception e) {
            Crashlytics.logException(e);
        }

        try {
            if (Build.VERSION.SDK_INT >= 16 && buffer != null && server != null) {
                NfcAdapter nfc = NfcAdapter.getDefaultAdapter(this);
                if (nfc != null) {
                    String uri = "irc";
                    if (server.ssl > 0)
                        uri += "s";
                    uri += "://" + server.hostname + ":" + server.port;
                    if (buffer.type.equals("channel")) {
                        uri += "/" + URLEncoder.encode(buffer.name, "UTF-8");
                        ChannelsDataSource.Channel c = ChannelsDataSource.getInstance()
                                .getChannelForBuffer(buffer.bid);
                        if (c != null && c.hasMode("k"))
                            uri += "," + c.paramForMode("k");
                    }
                    nfc.setNdefPushMessage(new NdefMessage(NdefRecord.createUri(uri)), this);
                }
            }
        } catch (Exception e) {
        }
    } else {
        Crashlytics.log(Log.DEBUG, "IRCCloud",
                "Buffer selected but not found: bid" + bid + " shouldFadeIn: " + shouldFadeIn);
        server = null;
    }
    update_subtitle();
    final Bundle b = new Bundle();
    if (buffer != null)
        b.putInt("cid", buffer.cid);
    b.putInt("bid", bid);
    b.putBoolean("fade", shouldFadeIn);
    BuffersListFragment blf = (BuffersListFragment) getSupportFragmentManager()
            .findFragmentById(R.id.BuffersList);
    final MessageViewFragment mvf = (MessageViewFragment) getSupportFragmentManager()
            .findFragmentById(R.id.messageViewFragment);
    UsersListFragment ulf = (UsersListFragment) getSupportFragmentManager()
            .findFragmentById(R.id.usersListFragment);
    UsersListFragment ulf2 = (UsersListFragment) getSupportFragmentManager()
            .findFragmentById(R.id.usersListFragment2);
    if (mvf != null)
        mvf.ready = false;
    if (blf != null)
        blf.setSelectedBid(bid);
    if (ulf != null)
        ulf.setArguments(b);
    if (ulf2 != null)
        ulf2.setArguments(b);

    if (shouldFadeIn) {
        Crashlytics.log(Log.DEBUG, "IRCCloud", "Fade Out");
        if (Build.VERSION.SDK_INT < 16) {
            AlphaAnimation anim = new AlphaAnimation(1, 0);
            anim.setDuration(150);
            anim.setFillAfter(true);
            anim.setAnimationListener(new Animation.AnimationListener() {
                @Override
                public void onAnimationStart(Animation animation) {

                }

                @Override
                public void onAnimationEnd(Animation animation) {
                    if (mvf != null)
                        mvf.setArguments(b);
                    messageTxt.setText("");
                    if (buffer != null && buffer.draft != null)
                        messageTxt.append(buffer.draft);
                }

                @Override
                public void onAnimationRepeat(Animation animation) {

                }
            });
            try {
                mvf.getListView().startAnimation(anim);
                ulf.getListView().startAnimation(anim);
            } catch (Exception e) {

            }
        } else {
            mvf.getListView().animate().alpha(0).withEndAction(new Runnable() {
                @Override
                public void run() {
                    if (mvf != null)
                        mvf.setArguments(b);
                    messageTxt.setText("");
                    if (buffer != null && buffer.draft != null)
                        messageTxt.append(buffer.draft);
                }
            });
            ulf.getListView().animate().alpha(0);
        }
        mvf.showSpinner(true);
    } else {
        if (mvf != null)
            mvf.setArguments(b);
        messageTxt.setText("");
        if (buffer != null && buffer.draft != null)
            messageTxt.append(buffer.draft);
    }

    updateUsersListFragmentVisibility();
    supportInvalidateOptionsMenu();
    if (excludeBIDTask != null)
        excludeBIDTask.cancel(true);
    excludeBIDTask = new ExcludeBIDTask();
    excludeBIDTask.execute(bid);
    if (drawerLayout != null)
        new RefreshUpIndicatorTask().execute((Void) null);
    if (buffer != null && buffer.cid != -1) {
        if (drawerLayout != null) {
            drawerLayout.setDrawerLockMode(DrawerLayout.LOCK_MODE_UNLOCKED, Gravity.LEFT);
            getSupportActionBar().setHomeButtonEnabled(true);
        }
    }
    update_suggestions(false);
}

From source file:org.apache.geode.internal.cache.GemFireCacheImpl.java

private LinkedHashMap<String, PartitionedRegion> orderByColocation(TreeMap<String, PartitionedRegion> prMap) {
    LinkedHashMap<String, PartitionedRegion> orderedPrMap = new LinkedHashMap();
    for (PartitionedRegion pr : prMap.values()) {
        addColocatedChildRecursively(orderedPrMap, pr);
    }/*from w  w w .  j av a  2s. c o m*/
    return orderedPrMap;
}

From source file:org.apache.geode.internal.cache.GemFireCacheImpl.java

private TreeMap<String, Map<String, PartitionedRegion>> getPRTrees() {
    // prTree will save a sublist of PRs who are under the same root
    TreeMap<String, Map<String, PartitionedRegion>> prTrees = new TreeMap();
    TreeMap<String, PartitionedRegion> prMap = getPartitionedRegionMap();
    boolean hasColocatedRegion = false;
    for (PartitionedRegion pr : prMap.values()) {
        List<PartitionedRegion> childlist = ColocationHelper.getColocatedChildRegions(pr);
        if (childlist != null && childlist.size() > 0) {
            hasColocatedRegion = true;/*from   w w  w  . j  av  a2s.c o m*/
            break;
        }
    }

    if (hasColocatedRegion) {
        LinkedHashMap<String, PartitionedRegion> orderedPrMap = orderByColocation(prMap);
        prTrees.put("ROOT", orderedPrMap);
    } else {
        for (PartitionedRegion pr : prMap.values()) {
            String rootName = pr.getRoot().getName();
            TreeMap<String, PartitionedRegion> prSubMap = (TreeMap<String, PartitionedRegion>) prTrees
                    .get(rootName);
            if (prSubMap == null) {
                prSubMap = new TreeMap();
                prTrees.put(rootName, prSubMap);
            }
            prSubMap.put(pr.getFullPath(), pr);
        }
    }

    return prTrees;
}

From source file:architecture.user.spring.controller.SecureUserMgmtDataController.java

@RequestMapping(value = "/mgmt/permissions/list.json", method = { RequestMethod.POST })
@ResponseBody/*from   w w w.j  a  va 2 s .  co m*/
public Map<String, Object> listAllPermissions(@RequestBody PermsSetForm permsSetGroup) {
    User currentUser = SecurityHelper.getUser();

    List<PermSet> list1 = new ArrayList<PermSet>();
    List<PermSet> list2 = new ArrayList<PermSet>();
    List<UserPermSet> list3 = new ArrayList<UserPermSet>();
    List<GroupPermSet> list4 = new ArrayList<GroupPermSet>();
    TreeMap<User, UserPermSet> tree1 = new TreeMap<User, UserPermSet>(new Comparator<User>() {
        public int compare(User o1, User o2) {
            return o1.getUsername().toLowerCase().compareTo(o2.getUsername().toLowerCase());
        }
    });
    TreeMap<Group, GroupPermSet> tree2 = new TreeMap<Group, GroupPermSet>(new Comparator<Group>() {
        public int compare(Group o1, Group o2) {
            return o1.getName().toLowerCase().compareTo(o2.getName().toLowerCase());
        }
    });
    PermissionsManagerHelper helper = getPermissionsManagerHelper(permsSetGroup.getObjectType(),
            permsSetGroup.getObjectId());

    for (String permissionName : permsSetGroup.getPerms()) {
        long permission = Permissions.PermissionAtom.valueOf(permissionName).getAtomId();
        if (Permissions.PermissionAtom.valueOf(permissionName) != null)
            permission = Permissions.PermissionAtom.valueOf(permissionName).getAtomId();
        else
            permission = permissionsManager.getPermissionMask(permissionName);

        log.debug("permission:" + permissionName + "(" + permission + ")");

        // anonymous
        PermSet p1 = new PermSet(permissionName);
        p1.setAdditive(helper.anonymousUserHasPermission(PermissionType.ADDITIVE, permission));
        p1.setNegative(helper.anonymousUserHasPermission(PermissionType.NEGATIVE, permission));
        p1.setInherited(false);
        list1.add(p1);
        // member
        PermSet p2 = new PermSet(permissionName);
        p2.setAdditive(helper.registeredUserHasPermission(PermissionType.ADDITIVE, permission));
        p2.setNegative(helper.registeredUserHasPermission(PermissionType.NEGATIVE, permission));
        p2.setInherited(false);
        list2.add(p2);

        // users

        log.debug("user : " + helper.usersWithPermissionCount(PermissionType.ADDITIVE, permission));
        for (User user : helper.usersWithPermission(PermissionType.ADDITIVE, permission)) {
            if (tree1.containsKey(user)) {
                UserPermSet up = tree1.get(user);
                up.getPermSet(permissionName, true).setAdditive(true);
            } else {
                UserPermSet up = new UserPermSet(user);
                up.getPermSet(permissionName, true).setAdditive(true);
                tree1.put(user, up);
            }
        }
        for (User user : helper.usersWithPermission(PermissionType.NEGATIVE, permission)) {
            if (tree1.containsKey(user)) {
                UserPermSet up = tree1.get(user);
                up.getPermSet(permissionName, true).setNegative(true);
            } else {
                UserPermSet up = new UserPermSet(user);
                up.getPermSet(permissionName, true).setNegative(true);
                tree1.put(user, up);
            }
        }

        // groups
        log.debug("group : " + helper.groupsWithPermissionCount(PermissionType.ADDITIVE, permission));

        for (Group group : helper.groupsWithPermission(PermissionType.ADDITIVE, permission)) {
            if (tree1.containsKey(group)) {
                GroupPermSet gp = tree2.get(group);
                gp.getPermSet(permissionName, true).setAdditive(true);
            } else {
                GroupPermSet gp = new GroupPermSet(group);
                gp.getPermSet(permissionName, true).setAdditive(true);
                tree2.put(group, gp);
            }
        }
        for (Group group : helper.groupsWithPermission(PermissionType.NEGATIVE, permission)) {
            if (tree1.containsKey(group)) {
                GroupPermSet gp = tree2.get(group);
                gp.getPermSet(permissionName, true).setNegative(true);
            } else {
                GroupPermSet gp = new GroupPermSet(group);
                gp.getPermSet(permissionName, true).setNegative(true);
                tree2.put(group, gp);
            }
        }

    }

    list3.addAll(tree1.values());
    list4.addAll(tree2.values());

    Map<String, Object> map = new HashMap<String, Object>();
    map.put("anonymous", list1);
    map.put("member", list2);
    map.put("users", list3);
    map.put("groups", list4);
    return map;
}

From source file:org.apache.hadoop.hbase.regionserver.StripeStoreFileManager.java

/**
 * Loads initial store files that were picked up from some physical location pertaining to
 * this store (presumably). Unlike adding files after compaction, assumes empty initial
 * sets, and is forgiving with regard to stripe constraints - at worst, many/all files will
 * go to level 0.//ww w  .  j a  v  a  2  s.c om
 * @param storeFiles Store files to add.
 */
private void loadUnclassifiedStoreFiles(List<StoreFile> storeFiles) {
    LOG.debug("Attempting to load " + storeFiles.size() + " store files.");
    TreeMap<byte[], ArrayList<StoreFile>> candidateStripes = new TreeMap<byte[], ArrayList<StoreFile>>(
            MAP_COMPARATOR);
    ArrayList<StoreFile> level0Files = new ArrayList<StoreFile>();
    // Separate the files into tentative stripes; then validate. Currently, we rely on metadata.
    // If needed, we could dynamically determine the stripes in future.
    for (StoreFile sf : storeFiles) {
        byte[] startRow = startOf(sf), endRow = endOf(sf);
        // Validate the range and put the files into place.
        if (isInvalid(startRow) || isInvalid(endRow)) {
            insertFileIntoStripe(level0Files, sf); // No metadata - goes to L0.
            ensureLevel0Metadata(sf);
        } else if (!isOpen(startRow) && !isOpen(endRow) && nonOpenRowCompare(startRow, endRow) >= 0) {
            LOG.error("Unexpected metadata - start row [" + Bytes.toString(startRow) + "], end row ["
                    + Bytes.toString(endRow) + "] in file [" + sf.getPath() + "], pushing to L0");
            insertFileIntoStripe(level0Files, sf); // Bad metadata - goes to L0 also.
            ensureLevel0Metadata(sf);
        } else {
            ArrayList<StoreFile> stripe = candidateStripes.get(endRow);
            if (stripe == null) {
                stripe = new ArrayList<StoreFile>();
                candidateStripes.put(endRow, stripe);
            }
            insertFileIntoStripe(stripe, sf);
        }
    }
    // Possible improvement - for variable-count stripes, if all the files are in L0, we can
    // instead create single, open-ended stripe with all files.

    boolean hasOverlaps = false;
    byte[] expectedStartRow = null; // first stripe can start wherever
    Iterator<Map.Entry<byte[], ArrayList<StoreFile>>> entryIter = candidateStripes.entrySet().iterator();
    while (entryIter.hasNext()) {
        Map.Entry<byte[], ArrayList<StoreFile>> entry = entryIter.next();
        ArrayList<StoreFile> files = entry.getValue();
        // Validate the file start rows, and remove the bad ones to level 0.
        for (int i = 0; i < files.size(); ++i) {
            StoreFile sf = files.get(i);
            byte[] startRow = startOf(sf);
            if (expectedStartRow == null) {
                expectedStartRow = startRow; // ensure that first stripe is still consistent
            } else if (!rowEquals(expectedStartRow, startRow)) {
                hasOverlaps = true;
                LOG.warn("Store file doesn't fit into the tentative stripes - expected to start at ["
                        + Bytes.toString(expectedStartRow) + "], but starts at [" + Bytes.toString(startRow)
                        + "], to L0 it goes");
                StoreFile badSf = files.remove(i);
                insertFileIntoStripe(level0Files, badSf);
                ensureLevel0Metadata(badSf);
                --i;
            }
        }
        // Check if any files from the candidate stripe are valid. If so, add a stripe.
        byte[] endRow = entry.getKey();
        if (!files.isEmpty()) {
            expectedStartRow = endRow; // Next stripe must start exactly at that key.
        } else {
            entryIter.remove();
        }
    }

    // In the end, there must be open ends on two sides. If not, and there were no errors i.e.
    // files are consistent, they might be coming from a split. We will treat the boundaries
    // as open keys anyway, and log the message.
    // If there were errors, we'll play it safe and dump everything into L0.
    if (!candidateStripes.isEmpty()) {
        StoreFile firstFile = candidateStripes.firstEntry().getValue().get(0);
        boolean isOpen = isOpen(startOf(firstFile)) && isOpen(candidateStripes.lastKey());
        if (!isOpen) {
            LOG.warn("The range of the loaded files does not cover full key space: from ["
                    + Bytes.toString(startOf(firstFile)) + "], to ["
                    + Bytes.toString(candidateStripes.lastKey()) + "]");
            if (!hasOverlaps) {
                ensureEdgeStripeMetadata(candidateStripes.firstEntry().getValue(), true);
                ensureEdgeStripeMetadata(candidateStripes.lastEntry().getValue(), false);
            } else {
                LOG.warn("Inconsistent files, everything goes to L0.");
                for (ArrayList<StoreFile> files : candidateStripes.values()) {
                    for (StoreFile sf : files) {
                        insertFileIntoStripe(level0Files, sf);
                        ensureLevel0Metadata(sf);
                    }
                }
                candidateStripes.clear();
            }
        }
    }

    // Copy the results into the fields.
    State state = new State();
    state.level0Files = ImmutableList.copyOf(level0Files);
    state.stripeFiles = new ArrayList<ImmutableList<StoreFile>>(candidateStripes.size());
    state.stripeEndRows = new byte[Math.max(0, candidateStripes.size() - 1)][];
    ArrayList<StoreFile> newAllFiles = new ArrayList<StoreFile>(level0Files);
    int i = candidateStripes.size() - 1;
    for (Map.Entry<byte[], ArrayList<StoreFile>> entry : candidateStripes.entrySet()) {
        state.stripeFiles.add(ImmutableList.copyOf(entry.getValue()));
        newAllFiles.addAll(entry.getValue());
        if (i > 0) {
            state.stripeEndRows[state.stripeFiles.size() - 1] = entry.getKey();
        }
        --i;
    }
    state.allFilesCached = ImmutableList.copyOf(newAllFiles);
    this.state = state;
    debugDumpState("Files loaded");
}

From source file:com.irccloud.android.fragment.MessageViewFragment.java

private synchronized void refresh(MessageAdapter adapter, TreeMap<Long, EventsDataSource.Event> events) {
    synchronized (adapterLock) {
        hiddenMap = null;//from w  ww. j a  v a  2 s.  com
        expandMap = null;

        if (getActivity() != null)
            textSize = PreferenceManager.getDefaultSharedPreferences(getActivity()).getInt("textSize",
                    getActivity().getResources().getInteger(R.integer.default_text_size));
        timestamp_width = -1;
        if (conn.getReconnectTimestamp() == 0)
            conn.cancel_idle_timer(); //This may take a while...
        collapsedEvents.clear();
        currentCollapsedEid = -1;
        lastCollapsedDay = -1;

        if (events == null || (events.size() == 0 && buffer.min_eid > 0)) {
            if (buffer != null && conn != null && conn.getState() == NetworkConnection.STATE_CONNECTED) {
                requestingBacklog = true;
                runOnUiThread(new Runnable() {
                    @Override
                    public void run() {
                        conn.request_backlog(buffer.cid, buffer.bid, 0);
                    }
                });
            } else {
                runOnUiThread(new Runnable() {
                    @Override
                    public void run() {
                        headerView.setVisibility(View.GONE);
                        backlogFailed.setVisibility(View.GONE);
                        loadBacklogButton.setVisibility(View.GONE);
                    }
                });
            }
        } else if (events.size() > 0) {
            if (server != null) {
                ignore.setIgnores(server.ignores);
            } else {
                ignore.setIgnores(null);
            }
            collapsedEvents.setServer(server);
            earliest_eid = events.firstKey();
            if (events.firstKey() > buffer.min_eid && buffer.min_eid > 0 && conn != null
                    && conn.getState() == NetworkConnection.STATE_CONNECTED) {
                runOnUiThread(new Runnable() {
                    @Override
                    public void run() {
                        headerView.setVisibility(View.VISIBLE);
                        backlogFailed.setVisibility(View.GONE);
                        loadBacklogButton.setVisibility(View.GONE);
                    }
                });
            } else {
                runOnUiThread(new Runnable() {
                    @Override
                    public void run() {
                        headerView.setVisibility(View.GONE);
                        backlogFailed.setVisibility(View.GONE);
                        loadBacklogButton.setVisibility(View.GONE);
                    }
                });
            }
            if (events.size() > 0) {
                avgInsertTime = 0;
                //Debug.startMethodTracing("refresh");
                long start = System.currentTimeMillis();
                Iterator<EventsDataSource.Event> i = events.values().iterator();
                EventsDataSource.Event next = i.next();
                Calendar calendar = Calendar.getInstance();
                while (next != null) {
                    EventsDataSource.Event e = next;
                    next = i.hasNext() ? i.next() : null;
                    String type = (next == null) ? "" : next.type;

                    if (next != null && currentCollapsedEid != -1
                            && !expandedSectionEids.contains(currentCollapsedEid)
                            && (type.equalsIgnoreCase("joined_channel")
                                    || type.equalsIgnoreCase("parted_channel")
                                    || type.equalsIgnoreCase("nickchange") || type.equalsIgnoreCase("quit")
                                    || type.equalsIgnoreCase("user_channel_mode"))) {
                        calendar.setTimeInMillis(next.eid / 1000);
                        insertEvent(adapter, e, true, calendar.get(Calendar.DAY_OF_YEAR) == lastCollapsedDay);
                    } else {
                        insertEvent(adapter, e, true, false);
                    }
                }
                adapter.insertLastSeenEIDMarker();
                Log.i("IRCCloud", "Backlog rendering took: " + (System.currentTimeMillis() - start) + "ms");
                //Debug.stopMethodTracing();
                avgInsertTime = 0;
                //adapter.notifyDataSetChanged();
            }
        }
        if (conn.getReconnectTimestamp() == 0 && conn.getState() == NetworkConnection.STATE_CONNECTED)
            conn.schedule_idle_timer();
    }
}

From source file:org.apache.geode.internal.cache.GemFireCacheImpl.java

public void shutDownAll() {
    if (LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER) {
        try {//from   w w  w  .  ja  v  a2s.c om
            CacheObserverHolder.getInstance().beforeShutdownAll();
        } finally {
            LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
        }
    }
    if (!this.isShutDownAll.compareAndSet(false, true)) {
        // it's already doing shutdown by another thread
        try {
            this.shutDownAllFinished.await();
        } catch (InterruptedException e) {
            logger.debug("Shutdown all interrupted while waiting for another thread to do the shutDownAll");
            Thread.currentThread().interrupt();
        }
        return;
    }
    synchronized (GemFireCacheImpl.class) {
        try {
            boolean testIGE = Boolean.getBoolean("TestInternalGemFireError");

            if (testIGE) {
                InternalGemFireError assErr = new InternalGemFireError(
                        LocalizedStrings.GemFireCache_UNEXPECTED_EXCEPTION.toLocalizedString());
                throw assErr;
            }

            // bug 44031 requires multithread shutdownall should be grouped
            // by root region. However, shutDownAllDuringRecovery.conf test revealed that
            // we have to close colocated child regions first.
            // Now check all the PR, if anyone has colocate-with attribute, sort all the
            // PRs by colocation relationship and close them sequentially, otherwise still
            // group them by root region.
            TreeMap<String, Map<String, PartitionedRegion>> prTrees = getPRTrees();
            if (prTrees.size() > 1 && shutdownAllPoolSize != 1) {
                ExecutorService es = getShutdownAllExecutorService(prTrees.size());
                for (final Map<String, PartitionedRegion> prSubMap : prTrees.values()) {
                    es.execute(new Runnable() {
                        public void run() {
                            ConnectionTable.threadWantsSharedResources();
                            shutdownSubTreeGracefully(prSubMap);
                        }
                    });
                } // for each root
                es.shutdown();
                try {
                    es.awaitTermination(Integer.MAX_VALUE, TimeUnit.SECONDS);
                } catch (InterruptedException e) {
                    logger.debug("Shutdown all interrupted while waiting for PRs to be shutdown gracefully.");
                }

            } else {
                for (final Map<String, PartitionedRegion> prSubMap : prTrees.values()) {
                    shutdownSubTreeGracefully(prSubMap);
                }
            }

            close("Shut down all members", null, false, true);
        } finally {
            this.shutDownAllFinished.countDown();
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.TestHRegion.java

/**
 * Splits twice and verifies getting from each of the split regions.
 * /*ww w . j a  v a  2  s . co  m*/
 * @throws Exception
 */
@Test
public void testBasicSplit() throws Exception {
    byte[][] families = { fam1, fam2, fam3 };

    Configuration hc = initSplit();
    // Setting up region
    String method = this.getName();
    this.region = initHRegion(tableName, method, hc, families);

    try {
        LOG.info("" + HBaseTestCase.addContent(region, fam3));
        region.flushcache();
        region.compactStores();
        byte[] splitRow = region.checkSplit();
        assertNotNull(splitRow);
        LOG.info("SplitRow: " + Bytes.toString(splitRow));
        HRegion[] regions = splitRegion(region, splitRow);
        try {
            // Need to open the regions.
            // TODO: Add an 'open' to HRegion... don't do open by constructing
            // instance.
            for (int i = 0; i < regions.length; i++) {
                regions[i] = HRegion.openHRegion(regions[i], null);
            }
            // Assert can get rows out of new regions. Should be able to get first
            // row from first region and the midkey from second region.
            assertGet(regions[0], fam3, Bytes.toBytes(START_KEY));
            assertGet(regions[1], fam3, splitRow);
            // Test I can get scanner and that it starts at right place.
            assertScan(regions[0], fam3, Bytes.toBytes(START_KEY));
            assertScan(regions[1], fam3, splitRow);
            // Now prove can't split regions that have references.
            for (int i = 0; i < regions.length; i++) {
                // Add so much data to this region, we create a store file that is >
                // than one of our unsplitable references. it will.
                for (int j = 0; j < 2; j++) {
                    HBaseTestCase.addContent(regions[i], fam3);
                }
                HBaseTestCase.addContent(regions[i], fam2);
                HBaseTestCase.addContent(regions[i], fam1);
                regions[i].flushcache();
            }

            byte[][] midkeys = new byte[regions.length][];
            // To make regions splitable force compaction.
            for (int i = 0; i < regions.length; i++) {
                regions[i].compactStores();
                midkeys[i] = regions[i].checkSplit();
            }

            TreeMap<String, HRegion> sortedMap = new TreeMap<String, HRegion>();
            // Split these two daughter regions so then I'll have 4 regions. Will
            // split because added data above.
            for (int i = 0; i < regions.length; i++) {
                HRegion[] rs = null;
                if (midkeys[i] != null) {
                    rs = splitRegion(regions[i], midkeys[i]);
                    for (int j = 0; j < rs.length; j++) {
                        sortedMap.put(Bytes.toString(rs[j].getRegionName()), HRegion.openHRegion(rs[j], null));
                    }
                }
            }
            LOG.info("Made 4 regions");
            // The splits should have been even. Test I can get some arbitrary row
            // out of each.
            int interval = (LAST_CHAR - FIRST_CHAR) / 3;
            byte[] b = Bytes.toBytes(START_KEY);
            for (HRegion r : sortedMap.values()) {
                assertGet(r, fam3, b);
                b[0] += interval;
            }
        } finally {
            for (int i = 0; i < regions.length; i++) {
                try {
                    regions[i].close();
                } catch (IOException e) {
                    // Ignore.
                }
            }
        }
    } finally {
        HRegion.closeHRegion(this.region);
        this.region = null;
    }
}