Example usage for java.util NavigableMap get

List of usage examples for java.util NavigableMap get

Introduction

In this page you can find the example usage for java.util NavigableMap get.

Prototype

V get(Object key);

Source Link

Document

Returns the value to which the specified key is mapped, or null if this map contains no mapping for the key.

Usage

From source file:com.google.gwt.emultest.java.util.TreeMapTest.java

public void testEntrySet() {
    K[] keys = getSortedKeys();//from  w w w  .ja  v a2  s  .  c om
    V[] values = getSortedValues();
    NavigableMap<K, V> map = createNavigableMap();
    map.put(keys[0], values[0]);
    map.put(keys[1], values[1]);
    map.put(keys[2], values[2]);

    Set<Map.Entry<K, V>> entries = map.entrySet();
    Iterator<Map.Entry<K, V>> entrySetIterator = entries.iterator();
    assertEquals(3, entries.size());
    assertEquals(keys[0] + "=" + values[0], entrySetIterator.next().toString());
    while (entrySetIterator.hasNext()) {
        Map.Entry<K, V> entry = entrySetIterator.next();
        assertTrue(map.get(entry.getKey()) == entry.getValue());
    }

    assertEquals(map.size(), entries.size());
    _assertEquals(entries, map.entrySet());
    map.clear();
    assertEquals(map.size(), entries.size());
    _assertEquals(entries, map.entrySet());
    map.put(keys[0], values[0]);
    assertEquals(map.size(), entries.size());
    _assertEquals(entries, map.entrySet());
    entries.clear();
    assertEquals(map.size(), entries.size());
    _assertEquals(entries, map.entrySet());

    map.put(keys[1], values[1]);
    map.put(keys[2], values[2]);
    Iterator<Entry<K, V>> it = entries.iterator();
    while (it.hasNext()) {
        Map.Entry<K, V> entry = it.next();
        map.containsKey(entry.getKey());
        map.containsValue(entry.getValue());
        it.remove();
    }
    try {
        it.next();
        fail("should throw NoSuchElementException");
    } catch (NoSuchElementException expected) {
    }
    _assertEmpty(map);
}

From source file:com.mirth.connect.server.controllers.DonkeyMessageController.java

private List<MessageSearchResult> searchMessages(MessageFilter filter, String channelId, int offset,
        int limit) {
    long startTime = System.currentTimeMillis();

    FilterOptions filterOptions = new FilterOptions(filter, channelId);
    long maxMessageId = filterOptions.getMaxMessageId();
    long minMessageId = filterOptions.getMinMessageId();

    Long localChannelId = ChannelController.getInstance().getLocalChannelId(channelId);
    Map<String, Object> params = getBasicParameters(filter, localChannelId);

    try {//from   w w w .  j a va2s  . c om
        NavigableMap<Long, MessageSearchResult> messages = new TreeMap<Long, MessageSearchResult>();
        SqlSession session = SqlConfig.getSqlSessionManager();

        int offsetRemaining = offset;
        /*
         * If the limit is greater than the default batch size, use the limit, but cap it at
         * 50000.
         */
        long batchSize = Math.min(Math.max(limit, 500), 50000);
        long totalSearched = 0;

        while (messages.size() < limit && maxMessageId >= minMessageId) {
            /*
             * Slowly increase the batch size in case all the necessary results are found early
             * on.
             */
            if (totalSearched >= 100000 && batchSize < 50000) {
                batchSize = 50000;
            } else if (totalSearched >= 10000 && batchSize < 10000) {
                batchSize = 10000;
            } else if (totalSearched >= 1000 && batchSize < 1000) {
                batchSize = 1000;
            }

            /*
             * Search in descending order so that messages will be found from the greatest to
             * lowest message id
             */
            long currentMinMessageId = Math.max(maxMessageId - batchSize + 1, minMessageId);
            params.put("maxMessageId", maxMessageId);
            params.put("minMessageId", currentMinMessageId);
            maxMessageId -= batchSize;
            totalSearched += batchSize;

            Map<Long, MessageSearchResult> foundMessages = searchAll(session, params, filter, localChannelId,
                    false, filterOptions);

            if (!foundMessages.isEmpty()) {
                /*
                 * Skip results until there is no offset remaining. This is required when
                 * viewing results beyond the first page
                 */
                if (offsetRemaining >= foundMessages.size()) {
                    offsetRemaining -= foundMessages.size();
                } else if (offsetRemaining == 0) {
                    messages.putAll(foundMessages);
                } else {
                    NavigableMap<Long, MessageSearchResult> orderedMessages = new TreeMap<Long, MessageSearchResult>(
                            foundMessages);

                    while (offsetRemaining-- > 0) {
                        orderedMessages.pollLastEntry();
                    }

                    messages.putAll(orderedMessages);
                }
            }
        }

        // Remove results beyond the limit requested
        while (messages.size() > limit) {
            messages.pollFirstEntry();
        }

        List<MessageSearchResult> results = new ArrayList<MessageSearchResult>(messages.size());

        /*
         * Now that we have the message and metadata ids that should be returned as the result,
         * we need to retrieve the message data for those.
         */
        if (!messages.isEmpty()) {
            Iterator<Long> iterator = messages.descendingKeySet().iterator();

            while (iterator.hasNext()) {
                Map<String, Object> messageParams = new HashMap<String, Object>();
                messageParams.put("localChannelId", localChannelId);

                ListRangeIterator listRangeIterator = new ListRangeIterator(iterator,
                        ListRangeIterator.DEFAULT_LIST_LIMIT, false, null);

                while (listRangeIterator.hasNext()) {
                    ListRangeItem item = listRangeIterator.next();
                    List<Long> list = item.getList();
                    Long startRange = item.getStartRange();
                    Long endRange = item.getEndRange();

                    if (list != null || (startRange != null && endRange != null)) {
                        if (list != null) {
                            messageParams.remove("minMessageId");
                            messageParams.remove("maxMessageId");
                            messageParams.put("includeMessageList", StringUtils.join(list, ","));
                        } else {
                            messageParams.remove("includeMessageList");
                            messageParams.put("minMessageId", endRange);
                            messageParams.put("maxMessageId", startRange);
                        }

                        // Get the current batch of results
                        List<MessageSearchResult> currentResults = session
                                .selectList("Message.selectMessagesById", messageParams);

                        // Add the metadata ids to each result
                        for (MessageSearchResult currentResult : currentResults) {
                            currentResult.setMetaDataIdSet(
                                    messages.get(currentResult.getMessageId()).getMetaDataIdSet());
                        }

                        // Add the current batch to the final list of results
                        results.addAll(currentResults);
                    }
                }
            }
        }

        return results;
    } finally {
        long endTime = System.currentTimeMillis();
        logger.debug("Search executed in " + (endTime - startTime) + "ms");
    }
}

From source file:nz.co.fortytwo.signalk.processor.FullExportProcessor.java

private SignalKModel createTree(String routeId) {
    SignalKModel temp = SignalKModelFactory.getCleanInstance();
    if (logger.isDebugEnabled())
        logger.debug("subs for ws:" + wsSession + " = " + manager.getSubscriptions(wsSession));
    for (Subscription sub : manager.getSubscriptions(wsSession)) {
        if (sub != null && sub.isActive() && routeId.equals(sub.getRouteId())) {
            if (logger.isDebugEnabled())
                logger.debug("Found active sub:" + sub);
            for (String p : sub.getSubscribed(null)) {
                NavigableMap<String, Object> node = signalkModel.getSubMap(p);
                if (logger.isDebugEnabled())
                    logger.debug("Found node:" + p + " = " + node);
                for (String key : node.keySet()) {
                    if (key.contains(".meta."))
                        continue;
                    if (key.contains(".values."))
                        continue;
                    //if(key.contains(".source"))continue;
                    //if(key.contains(".$source"))continue;
                    Object val = node.get(key);
                    if (val != null) {
                        temp.getData().put(key, val);
                    }/*  ww w . j a  va2  s.c o m*/
                }

            }
        }
    }
    return temp;
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java

/**
 * Get the entry position that come before the specified position in the message stream, using information from the
 * ledger list and each ledger entries count.
 *
 * @param position/*from   w ww . j a va  2 s .  co  m*/
 *            the current position
 * @return the previous position
 */
PositionImpl getPreviousPosition(PositionImpl position) {
    if (position.getEntryId() > 0) {
        return PositionImpl.get(position.getLedgerId(), position.getEntryId() - 1);
    }

    // The previous position will be the last position of an earlier ledgers
    NavigableMap<Long, LedgerInfo> headMap = ledgers.headMap(position.getLedgerId(), false);

    if (headMap.isEmpty()) {
        // There is no previous ledger, return an invalid position in the current ledger
        return PositionImpl.get(position.getLedgerId(), -1);
    }

    // We need to find the most recent non-empty ledger
    for (long ledgerId : headMap.descendingKeySet()) {
        LedgerInfo li = headMap.get(ledgerId);
        if (li.getEntries() > 0) {
            return PositionImpl.get(li.getLedgerId(), li.getEntries() - 1);
        }
    }

    // in case there are only empty ledgers, we return a position in the first one
    return PositionImpl.get(headMap.firstEntry().getKey(), -1);
}

From source file:org.apache.cassandra.dht.tokenallocator.ReplicationAwareTokenAllocatorTest.java

private static double replicatedTokenOwnership(Token token, NavigableMap<Token, Unit> sortedTokens,
        ReplicationStrategy<Unit> strategy) {
    TestReplicationStrategy ts = (TestReplicationStrategy) strategy;
    Token next = sortedTokens.higherKey(token);
    if (next == null)
        next = sortedTokens.firstKey();//from  ww w.ja v a 2s  . c  o  m
    return ts.replicationStart(token, sortedTokens.get(token), sortedTokens).size(next);
}

From source file:org.apache.hadoop.hbase.client.TestFromClientSide.java

@Test
public void testUpdates() throws Exception {

    byte[] TABLE = Bytes.toBytes("testUpdates");
    HTable hTable = TEST_UTIL.createTable(TABLE, FAMILY, 10);

    // Write a column with values at timestamp 1, 2 and 3
    byte[] row = Bytes.toBytes("row1");
    byte[] qualifier = Bytes.toBytes("myCol");
    Put put = new Put(row);
    put.add(FAMILY, qualifier, 1L, Bytes.toBytes("AAA"));
    hTable.put(put);//  ww  w.ja  v a  2s .  c om

    put = new Put(row);
    put.add(FAMILY, qualifier, 2L, Bytes.toBytes("BBB"));
    hTable.put(put);

    put = new Put(row);
    put.add(FAMILY, qualifier, 3L, Bytes.toBytes("EEE"));
    hTable.put(put);

    Get get = new Get(row);
    get.addColumn(FAMILY, qualifier);
    get.setMaxVersions();

    // Check that the column indeed has the right values at timestamps 1 and
    // 2
    Result result = hTable.get(get);
    NavigableMap<Long, byte[]> navigableMap = result.getMap().get(FAMILY).get(qualifier);
    assertEquals("AAA", Bytes.toString(navigableMap.get(1L)));
    assertEquals("BBB", Bytes.toString(navigableMap.get(2L)));

    // Update the value at timestamp 1
    put = new Put(row);
    put.add(FAMILY, qualifier, 1L, Bytes.toBytes("CCC"));
    hTable.put(put);

    // Update the value at timestamp 2
    put = new Put(row);
    put.add(FAMILY, qualifier, 2L, Bytes.toBytes("DDD"));
    hTable.put(put);

    // Check that the values at timestamp 2 and 1 got updated
    result = hTable.get(get);
    navigableMap = result.getMap().get(FAMILY).get(qualifier);
    assertEquals("CCC", Bytes.toString(navigableMap.get(1L)));
    assertEquals("DDD", Bytes.toString(navigableMap.get(2L)));
}

From source file:org.apache.hadoop.hbase.client.TestFromClientSide.java

@Test
public void testUpdatesWithMajorCompaction() throws Exception {

    String tableName = "testUpdatesWithMajorCompaction";
    byte[] TABLE = Bytes.toBytes(tableName);
    HTable hTable = TEST_UTIL.createTable(TABLE, FAMILY, 10);
    HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());

    // Write a column with values at timestamp 1, 2 and 3
    byte[] row = Bytes.toBytes("row2");
    byte[] qualifier = Bytes.toBytes("myCol");
    Put put = new Put(row);
    put.add(FAMILY, qualifier, 1L, Bytes.toBytes("AAA"));
    hTable.put(put);/*  www.  java  2s.  c  o  m*/

    put = new Put(row);
    put.add(FAMILY, qualifier, 2L, Bytes.toBytes("BBB"));
    hTable.put(put);

    put = new Put(row);
    put.add(FAMILY, qualifier, 3L, Bytes.toBytes("EEE"));
    hTable.put(put);

    Get get = new Get(row);
    get.addColumn(FAMILY, qualifier);
    get.setMaxVersions();

    // Check that the column indeed has the right values at timestamps 1 and
    // 2
    Result result = hTable.get(get);
    NavigableMap<Long, byte[]> navigableMap = result.getMap().get(FAMILY).get(qualifier);
    assertEquals("AAA", Bytes.toString(navigableMap.get(1L)));
    assertEquals("BBB", Bytes.toString(navigableMap.get(2L)));

    // Trigger a major compaction
    admin.flush(tableName);
    admin.majorCompact(tableName);
    Thread.sleep(6000);

    // Update the value at timestamp 1
    put = new Put(row);
    put.add(FAMILY, qualifier, 1L, Bytes.toBytes("CCC"));
    hTable.put(put);

    // Update the value at timestamp 2
    put = new Put(row);
    put.add(FAMILY, qualifier, 2L, Bytes.toBytes("DDD"));
    hTable.put(put);

    // Trigger a major compaction
    admin.flush(tableName);
    admin.majorCompact(tableName);
    Thread.sleep(6000);

    // Check that the values at timestamp 2 and 1 got updated
    result = hTable.get(get);
    navigableMap = result.getMap().get(FAMILY).get(qualifier);
    assertEquals("CCC", Bytes.toString(navigableMap.get(1L)));
    assertEquals("DDD", Bytes.toString(navigableMap.get(2L)));
}

From source file:org.apache.hadoop.hbase.client.TestFromClientSide.java

@Test
public void testMajorCompactionBetweenTwoUpdates() throws Exception {

    String tableName = "testMajorCompactionBetweenTwoUpdates";
    byte[] TABLE = Bytes.toBytes(tableName);
    HTable hTable = TEST_UTIL.createTable(TABLE, FAMILY, 10);
    HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());

    // Write a column with values at timestamp 1, 2 and 3
    byte[] row = Bytes.toBytes("row3");
    byte[] qualifier = Bytes.toBytes("myCol");
    Put put = new Put(row);
    put.add(FAMILY, qualifier, 1L, Bytes.toBytes("AAA"));
    hTable.put(put);/*from   w ww. j  a va 2  s  .co m*/

    put = new Put(row);
    put.add(FAMILY, qualifier, 2L, Bytes.toBytes("BBB"));
    hTable.put(put);

    put = new Put(row);
    put.add(FAMILY, qualifier, 3L, Bytes.toBytes("EEE"));
    hTable.put(put);

    Get get = new Get(row);
    get.addColumn(FAMILY, qualifier);
    get.setMaxVersions();

    // Check that the column indeed has the right values at timestamps 1 and
    // 2
    Result result = hTable.get(get);
    NavigableMap<Long, byte[]> navigableMap = result.getMap().get(FAMILY).get(qualifier);
    assertEquals("AAA", Bytes.toString(navigableMap.get(1L)));
    assertEquals("BBB", Bytes.toString(navigableMap.get(2L)));

    // Trigger a major compaction
    admin.flush(tableName);
    admin.majorCompact(tableName);
    Thread.sleep(6000);

    // Update the value at timestamp 1
    put = new Put(row);
    put.add(FAMILY, qualifier, 1L, Bytes.toBytes("CCC"));
    hTable.put(put);

    // Trigger a major compaction
    admin.flush(tableName);
    admin.majorCompact(tableName);
    Thread.sleep(6000);

    // Update the value at timestamp 2
    put = new Put(row);
    put.add(FAMILY, qualifier, 2L, Bytes.toBytes("DDD"));
    hTable.put(put);

    // Trigger a major compaction
    admin.flush(tableName);
    admin.majorCompact(tableName);
    Thread.sleep(6000);

    // Check that the values at timestamp 2 and 1 got updated
    result = hTable.get(get);
    navigableMap = result.getMap().get(FAMILY).get(qualifier);

    assertEquals("CCC", Bytes.toString(navigableMap.get(1L)));
    assertEquals("DDD", Bytes.toString(navigableMap.get(2L)));
}

From source file:org.apache.hadoop.hbase.client.TestGetRowVersions.java

/** @throws Exception */
public void testGetRowMultipleVersions() throws Exception {
    Put put = new Put(ROW, TIMESTAMP1, null);
    put.add(CONTENTS, CONTENTS, VALUE1);
    HTable table = new HTable(new Configuration(conf), TABLE_NAME);
    table.put(put);//from   w  w w.  j  a v  a  2s .c  o m
    // Shut down and restart the HBase cluster
    this.cluster.shutdown();
    this.zooKeeperCluster.shutdown();
    LOG.debug("HBase cluster shut down -- restarting");
    this.hBaseClusterSetup();
    // Make a new connection.  Use new Configuration instance because old one
    // is tied to an HConnection that has since gone statle.
    table = new HTable(new Configuration(conf), TABLE_NAME);
    // Overwrite previous value
    put = new Put(ROW, TIMESTAMP2, null);
    put.add(CONTENTS, CONTENTS, VALUE2);
    table.put(put);
    // Now verify that getRow(row, column, latest) works
    Get get = new Get(ROW);
    // Should get one version by default
    Result r = table.get(get);
    assertNotNull(r);
    assertFalse(r.isEmpty());
    assertTrue(r.size() == 1);
    byte[] value = r.getValue(CONTENTS, CONTENTS);
    assertTrue(value.length != 0);
    assertTrue(Bytes.equals(value, VALUE2));
    // Now check getRow with multiple versions
    get = new Get(ROW);
    get.setMaxVersions();
    r = table.get(get);
    assertTrue(r.size() == 2);
    value = r.getValue(CONTENTS, CONTENTS);
    assertTrue(value.length != 0);
    assertTrue(Bytes.equals(value, VALUE2));
    NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map = r.getMap();
    NavigableMap<byte[], NavigableMap<Long, byte[]>> familyMap = map.get(CONTENTS);
    NavigableMap<Long, byte[]> versionMap = familyMap.get(CONTENTS);
    assertTrue(versionMap.size() == 2);
    assertTrue(Bytes.equals(VALUE1, versionMap.get(TIMESTAMP1)));
    assertTrue(Bytes.equals(VALUE2, versionMap.get(TIMESTAMP2)));
}

From source file:org.apache.hadoop.hbase.coprocessor.example.WriteHeavyIncrementObserver.java

@Override
public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> c, Get get, List<Cell> result)
        throws IOException {
    Scan scan = new Scan().withStartRow(get.getRow()).withStopRow(get.getRow(), true).readAllVersions();
    NavigableMap<byte[], NavigableMap<byte[], MutableLong>> sums = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    get.getFamilyMap().forEach((cf, cqs) -> {
        NavigableMap<byte[], MutableLong> ss = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        sums.put(cf, ss);//from ww w  .j  a  v  a 2 s.co m
        cqs.forEach(cq -> {
            ss.put(cq, new MutableLong(0));
            scan.addColumn(cf, cq);
        });
    });
    List<Cell> cells = new ArrayList<>();
    try (RegionScanner scanner = c.getEnvironment().getRegion().getScanner(scan)) {
        boolean moreRows;
        do {
            moreRows = scanner.next(cells);
            for (Cell cell : cells) {
                byte[] family = CellUtil.cloneFamily(cell);
                byte[] qualifier = CellUtil.cloneQualifier(cell);
                long value = Bytes.toLong(cell.getValueArray(), cell.getValueOffset());
                sums.get(family).get(qualifier).add(value);
            }
            cells.clear();
        } while (moreRows);
    }
    sums.forEach((cf, m) -> m.forEach((cq, s) -> result
            .add(createCell(get.getRow(), cf, cq, HConstants.LATEST_TIMESTAMP, s.longValue()))));
    c.bypass();
}