Example usage for java.util NavigableMap keySet

List of usage examples for java.util NavigableMap keySet

Introduction

In this page you can find the example usage for java.util NavigableMap keySet.

Prototype

Set<K> keySet();

Source Link

Document

Returns a Set view of the keys contained in this map.

Usage

From source file:com.google.gwt.emultest.java.util.TreeMapTest.java

public void testLowerEntry() {
    K[] keys = getSortedKeys();/*from  w  w  w .  j a  v  a2s  . com*/
    V[] values = getSortedValues();
    NavigableMap<K, V> map = createNavigableMap();

    // test with a single entry map
    map.put(keys[0], values[0]);
    assertNull(map.lowerEntry(getLessThanMinimumKey()));
    assertNull(map.lowerEntry(keys[0]));
    assertEquals(keys[0], map.lowerEntry(keys[1]).getKey());
    assertEquals(values[0], map.lowerEntry(keys[1]).getValue());
    assertEquals(keys[0], map.lowerEntry(getGreaterThanMaximumKey()).getKey());
    assertEquals(values[0], map.lowerEntry(getGreaterThanMaximumKey()).getValue());
    // is it consistent with other methods
    assertEquals(map.keySet().toArray()[0], map.lowerEntry(keys[1]).getKey());

    // test with two entry map
    map.put(keys[1], values[1]);
    assertNull(map.lowerEntry(getLessThanMinimumKey()));
    assertNull(map.lowerEntry(keys[0]));
    assertEquals(values[0], map.lowerEntry(keys[1]).getValue());
    Entry<K, V> entry = map.lowerEntry(getGreaterThanMaximumKey());
    verifyEntry(entry);
    assertEquals(keys[1], entry.getKey());

    try {
        map.lowerEntry(null);
        assertTrue("expected exception", useNullKey());
    } catch (NullPointerException e) {
        assertFalse("unexpected NPE", useNullKey());
    }
    map.clear();
    assertNull(map.lowerEntry(keys[1]));
    assertNull(map.lowerEntry(null));
}

From source file:com.google.gwt.emultest.java.util.TreeMapTest.java

public void testFloorEntry() {
    K[] keys = getSortedKeys();/* www . j ava 2 s .co  m*/
    V[] values = getSortedValues();
    NavigableMap<K, V> map = createNavigableMap();

    // test with a single entry map
    map.put(keys[0], values[0]);
    assertNull(map.floorEntry(getLessThanMinimumKey()));
    assertEquals(keys[0], map.floorEntry(keys[0]).getKey());
    assertEquals(values[0], map.floorEntry(keys[0]).getValue());
    assertEquals(keys[0], map.floorEntry(keys[1]).getKey());
    assertEquals(values[0], map.floorEntry(keys[1]).getValue());
    assertEquals(keys[0], map.floorEntry(getGreaterThanMaximumKey()).getKey());
    // is it consistent with other methods
    assertEquals(map.keySet().toArray()[0], map.floorEntry(keys[1]).getKey());

    // test with two entry map
    map.put(keys[1], values[1]);
    assertNull(map.floorEntry(getLessThanMinimumKey()));
    assertEquals(keys[0], map.floorEntry(keys[0]).getKey());
    Entry<K, V> entry = map.floorEntry(keys[1]);
    verifyEntry(entry);
    assertEquals(keys[1], entry.getKey());
    assertEquals(values[1], entry.getValue());
    assertEquals(keys[1], map.floorEntry(getGreaterThanMaximumKey()).getKey());

    try {
        map.floorEntry(null);
        assertTrue("expected exception", useNullKey());
    } catch (NullPointerException e) {
        assertFalse("unexpected NPE", useNullKey());
    }
    map.clear();
    assertNull(map.floorEntry(keys[1]));
    assertNull(map.floorEntry(null));
}

From source file:net.spfbl.core.Peer.java

private synchronized Set<String> subSet(String begin, String end) {
    TreeSet<String> subSet = new TreeSet<String>();
    if (reputationMap2 != null) {
        NavigableMap<String, Binomial> subMap = reputationMap2.subMap(begin, false, end, false);
        subSet.addAll(subMap.keySet());
    }/*from  w ww  .  ja  va  2  s .  co m*/
    return subSet;
}

From source file:nz.co.fortytwo.signalk.processor.FullExportProcessor.java

private SignalKModel createTree(String routeId) {
    SignalKModel temp = SignalKModelFactory.getCleanInstance();
    if (logger.isDebugEnabled())
        logger.debug("subs for ws:" + wsSession + " = " + manager.getSubscriptions(wsSession));
    for (Subscription sub : manager.getSubscriptions(wsSession)) {
        if (sub != null && sub.isActive() && routeId.equals(sub.getRouteId())) {
            if (logger.isDebugEnabled())
                logger.debug("Found active sub:" + sub);
            for (String p : sub.getSubscribed(null)) {
                NavigableMap<String, Object> node = signalkModel.getSubMap(p);
                if (logger.isDebugEnabled())
                    logger.debug("Found node:" + p + " = " + node);
                for (String key : node.keySet()) {
                    if (key.contains(".meta."))
                        continue;
                    if (key.contains(".values."))
                        continue;
                    //if(key.contains(".source"))continue;
                    //if(key.contains(".$source"))continue;
                    Object val = node.get(key);
                    if (val != null) {
                        temp.getData().put(key, val);
                    }//www. j  av  a 2s  .  c  o  m
                }

            }
        }
    }
    return temp;
}

From source file:org.apache.bookkeeper.bookie.InterleavedLedgerStorage.java

@Override
public List<DetectedInconsistency> localConsistencyCheck(Optional<RateLimiter> rateLimiter) throws IOException {
    long checkStart = MathUtils.nowInNano();
    LOG.info("Starting localConsistencyCheck");
    long checkedLedgers = 0;
    long checkedPages = 0;
    final MutableLong checkedEntries = new MutableLong(0);
    final MutableLong pageRetries = new MutableLong(0);
    NavigableMap<Long, Boolean> bkActiveLedgersSnapshot = activeLedgers.snapshot();
    final List<DetectedInconsistency> errors = new ArrayList<>();
    for (Long ledger : bkActiveLedgersSnapshot.keySet()) {
        try (LedgerCache.PageEntriesIterable pages = ledgerCache.listEntries(ledger)) {
            for (LedgerCache.PageEntries page : pages) {
                @Cleanup//from w  w w  . j  a  v  a2s . c  o  m
                LedgerEntryPage lep = page.getLEP();
                MutableBoolean retry = new MutableBoolean(false);
                do {
                    retry.setValue(false);
                    int version = lep.getVersion();

                    MutableBoolean success = new MutableBoolean(true);
                    long start = MathUtils.nowInNano();
                    lep.getEntries((entry, offset) -> {
                        rateLimiter.ifPresent(RateLimiter::acquire);

                        try {
                            entryLogger.checkEntry(ledger, entry, offset);
                            checkedEntries.increment();
                        } catch (EntryLogger.EntryLookupException e) {
                            if (version != lep.getVersion()) {
                                pageRetries.increment();
                                if (lep.isDeleted()) {
                                    LOG.debug("localConsistencyCheck: ledger {} deleted", ledger);
                                } else {
                                    LOG.debug("localConsistencyCheck: concurrent modification, retrying");
                                    retry.setValue(true);
                                    retryCounter.inc();
                                }
                                return false;
                            } else {
                                errors.add(new DetectedInconsistency(ledger, entry, e));
                                LOG.error("Got error: ", e);
                            }
                            success.setValue(false);
                        }
                        return true;
                    });

                    if (success.booleanValue()) {
                        pageScanStats.registerSuccessfulEvent(MathUtils.elapsedNanos(start),
                                TimeUnit.NANOSECONDS);
                    } else {
                        pageScanStats.registerFailedEvent(MathUtils.elapsedNanos(start), TimeUnit.NANOSECONDS);
                    }
                } while (retry.booleanValue());
                checkedPages++;
            }
        } catch (NoLedgerException | FileInfo.FileInfoDeletedException e) {
            if (activeLedgers.containsKey(ledger)) {
                LOG.error("Cannot find ledger {}, should exist, exception is ", ledger, e);
                errors.add(new DetectedInconsistency(ledger, -1, e));
            } else {
                LOG.debug("ledger {} deleted since snapshot taken", ledger);
            }
        } catch (Exception e) {
            throw new IOException("Got other exception in localConsistencyCheck", e);
        }
        checkedLedgers++;
    }
    LOG.info(
            "Finished localConsistencyCheck, took {}s to scan {} ledgers, {} pages, "
                    + "{} entries with {} retries, {} errors",
            TimeUnit.NANOSECONDS.toSeconds(MathUtils.elapsedNanos(checkStart)), checkedLedgers, checkedPages,
            checkedEntries.longValue(), pageRetries.longValue(), errors.size());

    return errors;
}

From source file:org.apache.hadoop.hbase.client.HTable.java

/**
 * Gets the starting and ending row keys for every region in the currently
 * open table.//from  w ww .  j  a  v a  2  s  .c o m
 * <p>
 * This is mainly useful for the MapReduce integration.
 * @return Pair of arrays of region starting and ending row keys
 * @throws IOException if a remote or network exception occurs
 */
public Pair<byte[][], byte[][]> getStartEndKeys() throws IOException {
    NavigableMap<HRegionInfo, ServerName> regions = getRegionLocations();
    final List<byte[]> startKeyList = new ArrayList<byte[]>(regions.size());
    final List<byte[]> endKeyList = new ArrayList<byte[]>(regions.size());

    for (HRegionInfo region : regions.keySet()) {
        startKeyList.add(region.getStartKey());
        endKeyList.add(region.getEndKey());
    }

    return new Pair<byte[][], byte[][]>(startKeyList.toArray(new byte[startKeyList.size()][]),
            endKeyList.toArray(new byte[endKeyList.size()][]));
}

From source file:org.apache.hadoop.hbase.master.balancer.LocalityAwareLoadBalancer.java

/**
 * This implements the Locality Aware Load Balancer.
 * Information for the algorithm can be found here: https://issues.apache.org/jira/browse/HBASE-10075
 *
 * @param clusterMap Map of regionservers and their load/region information to
 *                   a list of their most loaded regions
 * @return a list of regions to be moved, including source and destination,
 *         or null if cluster is already balanced
 *///from ww w  . j a  va2  s .  c o m
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterMap) {
    long startTime = System.currentTimeMillis();

    ClusterLoadState cs = new ClusterLoadState(clusterMap);

    float average = cs.getLoadAverage(); // for logging
    int ceiling = (int) Math.ceil(average * (1 + slop));
    NavigableMap<ServerAndLoad, List<HRegionInfo>> serversByLoad = cs.getServersByLoad();

    if (!this.needsBalance(cs)) {
        /*LOG.info("Skipping load balancing because balanced cluster; " +
                "servers=" + cs.getNumServers() + " " +
                "regions=" + cs.getNumRegions() + " average=" + average + " " +
                "mostloaded=" + serversByLoad.lastKey().getLoad() +
                " leastloaded=" + serversByLoad.firstKey().getLoad());*/
        return null;
    }

    // Additional check for locality aware load balancer as it only considers
    // only max loaded servers
    if (!(cs.getMaxLoad() > ceiling)) {
        return null;
    }

    Cluster cluster = new Cluster(clusterMap, new HashMap<String, Deque<RegionLoad>>(), regionLocationFinder);
    int numRegions = cs.getNumRegions();

    LOG.info(" ####################################################################################");
    LOG.info(" Before Locality-aware Balancing");
    LOG.info(" Average=" + average + " Ceiling=" + ceiling + " slop=" + slop);
    /* for (ServerAndLoad server : serversByLoad.keySet()) {
      LOG.info("---------------" + "Server Name: " + server.getServerName() + "---------------");
      List<HRegionInfo> hRegionInfos = serversByLoad.get(server);
      LOG.info("Number of Regions:" + hRegionInfos.size());
      for (HRegionInfo hRegionInfo : hRegionInfos){
        LOG.info(String.format("Name of Region: %s ", hRegionInfo.getRegionNameAsString()));
        //LOG.info(String.format("Size of Region in number of rows"+(Bytes.toInt(hRegionInfo.getStartKey())-Bytes.toInt(hRegionInfo.getEndKey()))));
        LOG.info("Start Key: " + Bytes.toString(hRegionInfo.getStartKey()));
        LOG.info("End Key: " + Bytes.toString(hRegionInfo.getEndKey()));
      }
      LOG.info("------------------------------------------------------------------------------");
    } */

    // calculate allTableRegionNumber = total number of regions per table.
    Map<Integer, Integer> allTableRegionNumberMap = new HashMap<Integer, Integer>();
    for (int i = 0; i < cluster.numServers; ++i) {
        for (int j = 0; j < cluster.numTables; ++j) {
            if (allTableRegionNumberMap.containsKey(j)) {
                Integer integer = allTableRegionNumberMap.get(j);
                integer = integer + cluster.numRegionsPerServerPerTable[i][j];
                allTableRegionNumberMap.put(j, integer);
            } else {
                allTableRegionNumberMap.put(j, cluster.numRegionsPerServerPerTable[i][j]);
            }
        }
    }

    List<RegionPlan> regionsToReturn = new ArrayList<RegionPlan>();

    for (ServerAndLoad server : serversByLoad.keySet()) {
        List<HRegionInfo> hRegionInfos = serversByLoad.get(server);
        // Check if number of regions on current server is greater than floor.
        // Continue only if number regions is greater than floor.
        if (hRegionInfos.size() <= ceiling) {
            LOG.debug("Number of HRegions <= ceiling (" + hRegionInfos.size() + " <= " + ceiling + ")");
            continue;
        }
        PriorityQueue<RegionServerRegionAffinity> queue = new PriorityQueue<RegionServerRegionAffinity>();
        int numberOfRegionsToMove = hRegionInfos.size() - ceiling;
        double regionAffinityNumber = (1 - hRegionInfos.size() / numRegions) * SERVER_BALANCER_WEIGHT;
        double tableRegionAffinityNumber = 0;
        // Calculate allTableRegionNumber
        for (HRegionInfo hRegionInfo : hRegionInfos) {
            // Do not move metaregion.
            if (hRegionInfo.isMetaRegion()) {
                continue;
            }
            TableName table = hRegionInfo.getTable();
            String tableName = table.getNameAsString();
            int tableIndex = cluster.tablesToIndex.get(tableName);
            int serverIndex = cluster.serversToIndex.get(server.getServerName().getHostAndPort());
            tableRegionAffinityNumber = (1 - cluster.numRegionsPerServerPerTable[serverIndex][tableIndex]
                    / allTableRegionNumberMap.get(tableIndex)) * TABLE_BALANCER_WEIGHT;
            float localityIndex = getLocalityIndex(hRegionInfo, server) * LOCALITY_WEIGHT;
            LOG.info("tableRegionaffinity: " + tableRegionAffinityNumber);
            LOG.info("regionAffinityNUmber: " + regionAffinityNumber);
            LOG.info("localityIndex: " + localityIndex);
            double finalAffinity = regionAffinityNumber + tableRegionAffinityNumber + localityIndex
                    + getStickinessWeight(hRegionInfo);
            queue.add(new RegionServerRegionAffinity(server, hRegionInfo, finalAffinity));
            LOG.info("Affinity between server=" + server.getServerName() + " and region="
                    + hRegionInfo.getRegionNameAsString() + " is " + finalAffinity);
        }

        LOG.info("Number of regions to move=" + numberOfRegionsToMove + " All server and region affinities: "
                + queue);

        // Get top numberOfRegionsToMove
        List<RegionServerRegionAffinity> listOfRegionsToMove = new ArrayList<RegionServerRegionAffinity>();
        for (int i = 0; i < numberOfRegionsToMove; ++i) {
            if (queue.isEmpty()) {
                continue;
            }
            listOfRegionsToMove.add(queue.poll());
        }

        // Search for the most affine servers to these listOfRegionsToMove
        for (RegionServerRegionAffinity regionServerRegionAffinity : listOfRegionsToMove) {
            HRegionInfo hRegionInfoToMove = regionServerRegionAffinity.getHRegionInfo();
            ServerAndLoad serverToMove = null;
            double maxAffinity = Double.MIN_VALUE;
            // Get the most affine server to hRegionInfoToMove
            for (ServerAndLoad activeServer : serversByLoad.keySet()) {
                hRegionInfos = serversByLoad.get(activeServer);
                if (activeServer.equals(regionServerRegionAffinity.getServer())) {
                    continue;
                }
                if (hRegionInfos.size() >= ceiling) {
                    LOG.debug("Number of HRegions >= ceiling (" + hRegionInfos.size() + " >= " + ceiling + ")");
                    continue;
                }
                regionAffinityNumber = (1 - hRegionInfos.size() / numRegions) * SERVER_BALANCER_WEIGHT;
                TableName table = hRegionInfoToMove.getTable();
                String tableNameAsString = table.getNameAsString();
                int serverIndex = cluster.serversToIndex.get(activeServer.getServerName().getHostAndPort());
                tableRegionAffinityNumber = 0;
                if (cluster.tablesToIndex.containsKey(tableNameAsString)) {
                    Integer tableIndex = cluster.tablesToIndex.get(tableNameAsString);
                    tableRegionAffinityNumber = (1
                            - cluster.numRegionsPerServerPerTable[serverIndex][tableIndex]
                                    / allTableRegionNumberMap.get(tableIndex))
                            * TABLE_BALANCER_WEIGHT;
                } else {
                    LOG.error("Table " + tableNameAsString + "not present in cluster.tablesToIndex");
                }
                double finalAffinity = regionAffinityNumber + tableRegionAffinityNumber
                        + getLocalityIndex(hRegionInfoToMove, activeServer) * LOCALITY_WEIGHT
                        + getStickinessWeight(hRegionInfoToMove);
                if (finalAffinity > maxAffinity) {
                    maxAffinity = finalAffinity;
                    serverToMove = activeServer;
                }
            }
            regionsToReturn.add(new RegionPlan(hRegionInfoToMove,
                    regionServerRegionAffinity.getServer().getServerName(), serverToMove.getServerName()));
        }
    }

    LOG.info("Returning plan: " + regionsToReturn);

    // Reset previuosly moved regions and add new regions
    previouslyMovedRegions.clear();
    for (RegionPlan regionPlan : regionsToReturn) {
        previouslyMovedRegions.add(regionPlan.getRegionInfo());
    }

    long endTime = System.currentTimeMillis();
    LOG.info("Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving "
            + regionsToReturn.size() + " regions");
    return regionsToReturn;
}

From source file:org.apache.hadoop.hbase.master.MasterFileSystem.java

/**
 * Mark regions in recovering state when distributedLogReplay are set true
 * @param serverNames Set of ServerNames to be replayed wals in order to recover changes contained
 *          in them/*from   ww  w .ja  va  2 s.c  om*/
 * @throws IOException
 */
public void prepareLogReplay(Set<ServerName> serverNames) throws IOException {
    if (!this.distributedLogReplay) {
        return;
    }
    // mark regions in recovering state
    for (ServerName serverName : serverNames) {
        NavigableMap<HRegionInfo, Result> regions = this.getServerUserRegions(serverName);
        if (regions == null) {
            continue;
        }
        try {
            this.splitLogManager.markRegionsRecoveringInZK(serverName, regions.keySet());
        } catch (KeeperException e) {
            throw new IOException(e);
        }
    }
}

From source file:org.apache.kylin.rest.security.MockHTable.java

private static List<KeyValue> toKeyValue(byte[] row,
        NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> rowdata, long timestampStart,
        long timestampEnd, int maxVersions) {
    List<KeyValue> ret = new ArrayList<KeyValue>();
    for (byte[] family : rowdata.keySet())
        for (byte[] qualifier : rowdata.get(family).keySet()) {
            int versionsAdded = 0;
            for (Map.Entry<Long, byte[]> tsToVal : rowdata.get(family).get(qualifier).descendingMap()
                    .entrySet()) {/*from w  w w.  j  a va 2 s. c  om*/
                if (versionsAdded++ == maxVersions)
                    break;
                Long timestamp = tsToVal.getKey();
                if (timestamp < timestampStart)
                    continue;
                if (timestamp > timestampEnd)
                    continue;
                byte[] value = tsToVal.getValue();
                ret.add(new KeyValue(row, family, qualifier, timestamp, value));
            }
        }
    return ret;
}

From source file:org.apache.kylin.rest.service.AclService.java

private void genAces(List<Sid> sids, Result result, AclImpl acl)
        throws JsonParseException, JsonMappingException, IOException {
    List<AceInfo> aceInfos = new ArrayList<AceInfo>();
    if (null != sids) {
        // Just return aces in sids
        for (Sid sid : sids) {
            String sidName = null;
            if (sid instanceof PrincipalSid) {
                sidName = ((PrincipalSid) sid).getPrincipal();
            } else if (sid instanceof GrantedAuthoritySid) {
                sidName = ((GrantedAuthoritySid) sid).getGrantedAuthority();
            }//from  w  w w.j  a v a 2 s .com

            AceInfo aceInfo = aceSerializer.deserialize(
                    result.getValue(Bytes.toBytes(AclHBaseStorage.ACL_ACES_FAMILY), Bytes.toBytes(sidName)));
            if (null != aceInfo) {
                aceInfos.add(aceInfo);
            }
        }
    } else {
        NavigableMap<byte[], byte[]> familyMap = result
                .getFamilyMap(Bytes.toBytes(AclHBaseStorage.ACL_ACES_FAMILY));
        for (byte[] qualifier : familyMap.keySet()) {
            AceInfo aceInfo = aceSerializer.deserialize(familyMap.get(qualifier));

            if (null != aceInfo) {
                aceInfos.add(aceInfo);
            }
        }
    }

    List<AccessControlEntry> newAces = new ArrayList<AccessControlEntry>();
    for (int i = 0; i < aceInfos.size(); i++) {
        AceInfo aceInfo = aceInfos.get(i);

        if (null != aceInfo) {
            Sid sid = aceInfo.getSidInfo().isPrincipal() ? new PrincipalSid(aceInfo.getSidInfo().getSid())
                    : new GrantedAuthoritySid(aceInfo.getSidInfo().getSid());
            AccessControlEntry ace = new AccessControlEntryImpl(Long.valueOf(i), acl, sid,
                    aclPermissionFactory.buildFromMask(aceInfo.getPermissionMask()), true, false, false);
            newAces.add(ace);
        }
    }

    this.setAces(acl, newAces);
}