Example usage for java.util NavigableMap put

List of usage examples for java.util NavigableMap put

Introduction

In this page you can find the example usage for java.util NavigableMap put.

Prototype

V put(K key, V value);

Source Link

Document

Associates the specified value with the specified key in this map (optional operation).

Usage

From source file:org.apache.hadoop.hbase.wal.TestFSHLogProvider.java

@Test
public void testLogCleaning() throws Exception {
    LOG.info("testLogCleaning");
    final HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testLogCleaning"))
            .addFamily(new HColumnDescriptor("row"));
    final HTableDescriptor htd2 = new HTableDescriptor(TableName.valueOf("testLogCleaning2"))
            .addFamily(new HColumnDescriptor("row"));
    NavigableMap<byte[], Integer> scopes1 = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getFamiliesKeys()) {
        scopes1.put(fam, 0);
    }//from   www.  j  a v a  2 s . c o  m
    NavigableMap<byte[], Integer> scopes2 = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd2.getFamiliesKeys()) {
        scopes2.put(fam, 0);
    }
    final Configuration localConf = new Configuration(conf);
    localConf.set(WALFactory.WAL_PROVIDER, FSHLogProvider.class.getName());
    final WALFactory wals = new WALFactory(localConf, null, currentTest.getMethodName());
    final AtomicLong sequenceId = new AtomicLong(1);
    try {
        HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW,
                HConstants.EMPTY_END_ROW);
        HRegionInfo hri2 = new HRegionInfo(htd2.getTableName(), HConstants.EMPTY_START_ROW,
                HConstants.EMPTY_END_ROW);
        // we want to mix edits from regions, so pick our own identifier.
        final WAL log = wals.getWAL(UNSPECIFIED_REGION, null);

        // Add a single edit and make sure that rolling won't remove the file
        // Before HBASE-3198 it used to delete it
        addEdits(log, hri, htd, 1, scopes1);
        log.rollWriter();
        assertEquals(1, AbstractFSWALProvider.getNumRolledLogFiles(log));

        // See if there's anything wrong with more than 1 edit
        addEdits(log, hri, htd, 2, scopes1);
        log.rollWriter();
        assertEquals(2, FSHLogProvider.getNumRolledLogFiles(log));

        // Now mix edits from 2 regions, still no flushing
        addEdits(log, hri, htd, 1, scopes1);
        addEdits(log, hri2, htd2, 1, scopes2);
        addEdits(log, hri, htd, 1, scopes1);
        addEdits(log, hri2, htd2, 1, scopes2);
        log.rollWriter();
        assertEquals(3, AbstractFSWALProvider.getNumRolledLogFiles(log));

        // Flush the first region, we expect to see the first two files getting
        // archived. We need to append something or writer won't be rolled.
        addEdits(log, hri2, htd2, 1, scopes2);
        log.startCacheFlush(hri.getEncodedNameAsBytes(), htd.getFamiliesKeys());
        log.completeCacheFlush(hri.getEncodedNameAsBytes());
        log.rollWriter();
        assertEquals(2, AbstractFSWALProvider.getNumRolledLogFiles(log));

        // Flush the second region, which removes all the remaining output files
        // since the oldest was completely flushed and the two others only contain
        // flush information
        addEdits(log, hri2, htd2, 1, scopes2);
        log.startCacheFlush(hri2.getEncodedNameAsBytes(), htd2.getFamiliesKeys());
        log.completeCacheFlush(hri2.getEncodedNameAsBytes());
        log.rollWriter();
        assertEquals(0, AbstractFSWALProvider.getNumRolledLogFiles(log));
    } finally {
        if (wals != null) {
            wals.close();
        }
    }
}

From source file:org.apache.hadoop.hbase.wal.TestFSHLogProvider.java

/**
 * Tests wal archiving by adding data, doing flushing/rolling and checking we archive old logs
 * and also don't archive "live logs" (that is, a log with un-flushed entries).
 * <p>/*from  w  w w. j  av a  2  s. c  o m*/
 * This is what it does:
 * It creates two regions, and does a series of inserts along with log rolling.
 * Whenever a WAL is rolled, HLogBase checks previous wals for archiving. A wal is eligible for
 * archiving if for all the regions which have entries in that wal file, have flushed - past
 * their maximum sequence id in that wal file.
 * <p>
 * @throws IOException
 */
@Test
public void testWALArchiving() throws IOException {
    LOG.debug("testWALArchiving");
    HTableDescriptor table1 = new HTableDescriptor(TableName.valueOf("t1"))
            .addFamily(new HColumnDescriptor("row"));
    HTableDescriptor table2 = new HTableDescriptor(TableName.valueOf("t2"))
            .addFamily(new HColumnDescriptor("row"));
    NavigableMap<byte[], Integer> scopes1 = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : table1.getFamiliesKeys()) {
        scopes1.put(fam, 0);
    }
    NavigableMap<byte[], Integer> scopes2 = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : table2.getFamiliesKeys()) {
        scopes2.put(fam, 0);
    }
    final Configuration localConf = new Configuration(conf);
    localConf.set(WALFactory.WAL_PROVIDER, FSHLogProvider.class.getName());
    final WALFactory wals = new WALFactory(localConf, null, currentTest.getMethodName());
    try {
        final WAL wal = wals.getWAL(UNSPECIFIED_REGION, null);
        assertEquals(0, AbstractFSWALProvider.getNumRolledLogFiles(wal));
        HRegionInfo hri1 = new HRegionInfo(table1.getTableName(), HConstants.EMPTY_START_ROW,
                HConstants.EMPTY_END_ROW);
        HRegionInfo hri2 = new HRegionInfo(table2.getTableName(), HConstants.EMPTY_START_ROW,
                HConstants.EMPTY_END_ROW);
        // ensure that we don't split the regions.
        hri1.setSplit(false);
        hri2.setSplit(false);
        // variables to mock region sequenceIds.
        // start with the testing logic: insert a waledit, and roll writer
        addEdits(wal, hri1, table1, 1, scopes1);
        wal.rollWriter();
        // assert that the wal is rolled
        assertEquals(1, AbstractFSWALProvider.getNumRolledLogFiles(wal));
        // add edits in the second wal file, and roll writer.
        addEdits(wal, hri1, table1, 1, scopes1);
        wal.rollWriter();
        // assert that the wal is rolled
        assertEquals(2, AbstractFSWALProvider.getNumRolledLogFiles(wal));
        // add a waledit to table1, and flush the region.
        addEdits(wal, hri1, table1, 3, scopes1);
        flushRegion(wal, hri1.getEncodedNameAsBytes(), table1.getFamiliesKeys());
        // roll log; all old logs should be archived.
        wal.rollWriter();
        assertEquals(0, AbstractFSWALProvider.getNumRolledLogFiles(wal));
        // add an edit to table2, and roll writer
        addEdits(wal, hri2, table2, 1, scopes2);
        wal.rollWriter();
        assertEquals(1, AbstractFSWALProvider.getNumRolledLogFiles(wal));
        // add edits for table1, and roll writer
        addEdits(wal, hri1, table1, 2, scopes1);
        wal.rollWriter();
        assertEquals(2, AbstractFSWALProvider.getNumRolledLogFiles(wal));
        // add edits for table2, and flush hri1.
        addEdits(wal, hri2, table2, 2, scopes2);
        flushRegion(wal, hri1.getEncodedNameAsBytes(), table2.getFamiliesKeys());
        // the log : region-sequenceId map is
        // log1: region2 (unflushed)
        // log2: region1 (flushed)
        // log3: region2 (unflushed)
        // roll the writer; log2 should be archived.
        wal.rollWriter();
        assertEquals(2, AbstractFSWALProvider.getNumRolledLogFiles(wal));
        // flush region2, and all logs should be archived.
        addEdits(wal, hri2, table2, 2, scopes2);
        flushRegion(wal, hri2.getEncodedNameAsBytes(), table2.getFamiliesKeys());
        wal.rollWriter();
        assertEquals(0, AbstractFSWALProvider.getNumRolledLogFiles(wal));
    } finally {
        if (wals != null) {
            wals.close();
        }
    }
}

From source file:org.apache.hadoop.hdfs.util.TestCyclicIteration.java

private static void checkCyclicIteration(int numOfElements) {
    //create a tree map
    final NavigableMap<Integer, Integer> map = new TreeMap<Integer, Integer>();
    final Integer[] integers = new Integer[numOfElements];
    for (int i = 0; i < integers.length; i++) {
        integers[i] = 2 * i;/*from ww w. j a  va  2  s.c  o m*/
        map.put(integers[i], integers[i]);
    }
    LOG.info("\n\nintegers=" + Arrays.asList(integers));
    LOG.info("map=" + map);

    //try starting everywhere
    for (int start = -1; start <= 2 * integers.length - 1; start++) {
        //get a cyclic iteration
        final List<Integer> iteration = new ArrayList<Integer>();
        for (Map.Entry<Integer, Integer> e : new CyclicIteration<Integer, Integer>(map, start)) {
            iteration.add(e.getKey());
        }
        LOG.info("start=" + start + ", iteration=" + iteration);

        //verify results
        for (int i = 0; i < integers.length; i++) {
            final int j = ((start + 2) / 2 + i) % integers.length;
            assertEquals("i=" + i + ", j=" + j, iteration.get(i), integers[j]);
        }
    }
}

From source file:org.apache.impala.datagenerator.HBaseTestDataRegionAssigment.java

/**
 * Split the table regions according to splitPoints and pair up adjacent regions to the
 * same server. Each region pair in ([unbound:1,1:3], [3:5,5:7], [7:9,9:unbound])
 * will be on the same server.// www  .j av  a2 s  .c  o m
 * The table must have data loaded and only a single region.
 */
public void performAssigment(String tableName)
        throws IOException, InterruptedException, TableNotFoundException {
    HTableDescriptor[] desc = hbaseAdmin.listTables(tableName);
    if (desc == null || desc.length == 0) {
        throw new TableNotFoundException("Table " + tableName + " not found.");
    }

    if (hbaseAdmin.getTableRegions(tableName.getBytes()).size() == 1) {
        // Split into regions
        // The table has one region only to begin with. The logic of
        // blockUntilRegionSplit requires that the input regionName has performed a split. 
        // If the table has already been split (i.e. regions count > 1), the same split
        // call will be a no-op and this will cause blockUntilRegionSplit to break.
        for (int i = 0; i < splitPoints.length; ++i) {
            hbaseAdmin.majorCompact(tableName);
            List<HRegionInfo> regions = hbaseAdmin.getTableRegions(tableName.getBytes());
            HRegionInfo splitRegion = regions.get(regions.size() - 1);
            int attempt = 1;
            boolean done = false;
            while (!done && attempt < MAX_SPLIT_ATTEMPTS) {
                // HBase seems to not always properly receive/process this split RPC,
                // so we need to retry the split/block several times.
                hbaseAdmin.split(splitRegion.getRegionNameAsString(), splitPoints[i]);
                done = blockUntilRegionSplit(conf, WAIT_FOR_SPLIT_TIMEOUT, splitRegion.getRegionName(), true);
                Thread.sleep(100);
                ++attempt;
            }
            if (!done) {
                throw new IllegalStateException(String.format("Failed to split region '%s' after %s attempts.",
                        splitRegion.getRegionNameAsString(), WAIT_FOR_SPLIT_TIMEOUT));
            }
            LOG.info(String.format("Split region '%s' after %s attempts.", splitRegion.getRegionNameAsString(),
                    attempt));
        }
    }

    // Sort the region by start key
    List<HRegionInfo> regions = hbaseAdmin.getTableRegions(tableName.getBytes());
    Preconditions.checkArgument(regions.size() == splitPoints.length + 1);
    Collections.sort(regions);

    // Pair up two adjacent regions to the same region server. That is, 
    // region server 1 <- regions (unbound:1), (1:3)
    // region server 2 <- regions (3:5), (5:7)
    // region server 3 <- regions (7:9), (9:unbound)
    NavigableMap<HRegionInfo, ServerName> expectedLocs = Maps.newTreeMap();
    for (int i = 0; i < regions.size(); ++i) {
        HRegionInfo regionInfo = regions.get(i);
        int rsIdx = (i / 2) % sortedRS.size();
        ServerName regionServerName = sortedRS.get(rsIdx);
        hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(), regionServerName.getServerName().getBytes());
        expectedLocs.put(regionInfo, regionServerName);
    }

    // hbaseAdmin.move() is an asynchronous operation. HBase tests use sleep to wait for
    // the move to complete. It should be done in 10sec.
    int sleepCnt = 0;
    HTable hbaseTable = new HTable(conf, tableName);
    try {
        while (!expectedLocs.equals(hbaseTable.getRegionLocations()) && sleepCnt < 100) {
            Thread.sleep(100);
            ++sleepCnt;
        }
        NavigableMap<HRegionInfo, ServerName> actualLocs = hbaseTable.getRegionLocations();
        Preconditions.checkArgument(expectedLocs.equals(actualLocs));

        // Log the actual region location map
        for (Map.Entry<HRegionInfo, ServerName> entry : actualLocs.entrySet()) {
            LOG.info(printKey(entry.getKey().getStartKey()) + " -> " + entry.getValue().getHostAndPort());
        }

        // Force a major compaction such that the HBase table is backed by deterministic
        // physical artifacts (files, WAL, etc.). Our #rows estimate relies on the sizes of
        // these physical artifacts.
        LOG.info("Major compacting HBase table: " + tableName);
        hbaseAdmin.majorCompact(tableName);
    } finally {
        IOUtils.closeQuietly(hbaseTable);
    }
}

From source file:org.apache.kylin.rest.security.MockHTable.java

private <K, V> V forceFind(NavigableMap<K, V> map, K key, V newObject) {
    V data = map.get(key);//from   w w  w . java 2  s . c  om
    if (data == null) {
        data = newObject;
        map.put(key, data);
    }
    return data;
}

From source file:org.apache.kylin.rest.security.MockHTable.java

/**
 * {@inheritDoc}//from w w w . ja v a2s . co m
 */
@Override
public void put(Put put) throws IOException {
    byte[] row = put.getRow();
    NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> rowData = forceFind(data, row,
            new TreeMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>(Bytes.BYTES_COMPARATOR));
    for (byte[] family : put.getFamilyMap().keySet()) {
        if (columnFamilies.contains(new String(family)) == false) {
            throw new RuntimeException("Not Exists columnFamily : " + new String(family));
        }
        NavigableMap<byte[], NavigableMap<Long, byte[]>> familyData = forceFind(rowData, family,
                new TreeMap<byte[], NavigableMap<Long, byte[]>>(Bytes.BYTES_COMPARATOR));
        for (KeyValue kv : put.getFamilyMap().get(family)) {
            kv.updateLatestStamp(Bytes.toBytes(System.currentTimeMillis()));
            byte[] qualifier = kv.getQualifier();
            NavigableMap<Long, byte[]> qualifierData = forceFind(familyData, qualifier,
                    new TreeMap<Long, byte[]>());
            qualifierData.put(kv.getTimestamp(), kv.getValue());
        }
    }
}

From source file:org.apache.metron.enrichment.cli.LatencySummarizer.java

public static void updateStats(LatencyStats stats, Map<String, Object> doc) {
    Map<String, Long> latencyMap = new HashMap<>();
    NavigableMap<Long, String> latencyInvMap = new TreeMap<>();
    for (Map.Entry<String, Object> kv : doc.entrySet()) {
        if (kv.getKey().endsWith(".ts")) {
            String base = getBaseMetric(kv.getKey());
            long latency = Long.parseLong(kv.getValue().toString());
            latencyInvMap.put(latency, base);
            latencyMap.put(base, latency);
        }//from ww w  . j  ava2s  .  c o  m
    }
    List<String> metrics = new ArrayList<>();
    for (Map.Entry<Long, String> kv : latencyInvMap.entrySet()) {
        metrics.add(kv.getValue());
    }
    stats.updateMetrics(metrics);
    for (int i = 0; i < metrics.size(); ++i) {
        for (int j = i + 1; j < metrics.size(); ++j) {
            Pair p = new Pair(metrics.get(i), metrics.get(j));
            long ms = latencyMap.get(metrics.get(j)) - latencyMap.get(metrics.get(i));
            stats.put(j - i, p, ms);
        }
    }
}

From source file:org.interreg.docexplore.datalink.fs2.DeleteFS2PagesAction.java

public void doAction() throws Exception {
    File root = ((DataLinkFS2) link.getLink()).getFile();
    newPageNumbers.clear();//from w ww . java  2 s . c o  m
    for (Page page : pages) {
        File pageDir = PageFS2.getPageDir(root, book.getId(), page.getPageNumber());
        FileUtils.moveDirectoryToDirectory(pageDir, cacheDir, true);
    }
    Collection<File> files = orderedPages(BookFS2.getBookDir(root, book.getId()));
    int curNum = 1;
    NavigableMap<Integer, Page> newPagesByNumber = new TreeMap<Integer, Page>();
    for (File file : files) {
        int pageNum = Integer.parseInt(file.getName().substring("page".length()));
        if (curNum != pageNum) {
            file.renameTo(new File(file.getParentFile(), "page" + curNum));
            Page page = book.pagesByNumber.get(pageNum);
            if (page != null)
                page.pageNum = curNum;
            book.pagesByNumber.remove(pageNum);
            newPagesByNumber.put(curNum, page);
        } else
            newPagesByNumber.put(pageNum, book.pagesByNumber.get(pageNum));
        newPageNumbers.put(pageNum, curNum);
        curNum++;
    }
    book.pagesByNumber = newPagesByNumber;
}

From source file:org.jahia.modules.tagcloud.taglibs.TagCloudTag.java

/**
 * Generates the tag cloud associated with the specified bound component, including only tags with a cardinality above the specified minimum cardinality for inclusion, up to
 * the specified maximum number of tags.
 *
 * @param boundComponent                 the component for which we want to generate a tag cloud.
 * @param minimumCardinalityForInclusion minimum cardinality (i.e. number of tagged elements) for a tag to be included in the tag cloud
 * @param maxNumberOfTags                maximum number of tags included in the cloud, keeping most numerous tags first (i.e. tags with lower cardinality will be excluded from
 *                                       the cloud first)
 * @param currentQuery                   the currently applied facet query
 * @param renderContext                  the {@link org.jahia.services.render.RenderContext} in which this tag cloud is being generated
 * @throws RepositoryException if something went wrong accessing the JCR repository while processing the bound component's tags
 *///from  w ww .j av  a2s . co  m
public void generateTagCloud(JCRNodeWrapper boundComponent, int minimumCardinalityForInclusion,
        int maxNumberOfTags, String currentQuery, RenderContext renderContext) throws RepositoryException {

    // applied facets
    final Map<String, List<KeyValue>> appliedFacets = Functions.getAppliedFacetFilters(currentQuery);

    // query
    QueryResultWrapper filteredTags = getNodesWithFacets(boundComponent, minimumCardinalityForInclusion,
            maxNumberOfTags, appliedFacets);

    if (!filteredTags.isFacetResultsEmpty()) {
        // map recording which unapplied tags have which cardinality, sorted in reverse cardinality order (most numerous tags first, being more important)
        final NavigableMap<Integer, Set<Tag>> tagCounts = new TreeMap<Integer, Set<Tag>>();
        // applied tags facets
        final List<KeyValue> appliedTagsValues = appliedFacets.get(Constants.TAGS);
        // list of applied tags
        List<Tag> appliedTagsList = Collections.emptyList();
        if (appliedTagsValues != null) {
            appliedTagsList = new ArrayList<Tag>(appliedTagsValues.size());
        }

        // action URL start
        final String facetURLParameterName = getFacetURLParameterName(boundComponent.getName());
        final String url = renderContext.getURLGenerator().getMainResource();
        final String actionURLStart = url + "?" + facetURLParameterName + "=";

        // process the query results
        final FacetField tags = filteredTags.getFacetField(Constants.TAGS);
        final List<FacetField.Count> values = tags.getValues();
        int totalCardinality = 0;
        for (FacetField.Count value : values) {
            // facet query should only return tags with a cardinality greater than the one we specified
            final int count = (int) value.getCount();

            // facets return value of the j:tags property which is a weak reference to a node so we need to load it to get its name
            final String tagUUID = value.getName();
            final JCRNodeWrapper tagNode = boundComponent.getSession().getNodeByUUID(tagUUID);
            final String name = tagNode.getDisplayableName();

            // create tag
            final Tag tag = new Tag(name, count, tagUUID, value);

            if (!Functions.isFacetValueApplied(value, appliedFacets)) {
                // only add tag to cloud if it's not applied

                // increase totalCardinality with the current tag's count, this is used to compute the tag's weight in the cloud
                totalCardinality += count;

                // add tag to tag counts
                Set<Tag> associatedTags = tagCounts.get(count);
                if (associatedTags == null) {
                    associatedTags = new HashSet<Tag>();
                    tagCounts.put(count, associatedTags);
                }
                associatedTags.add(tag);
            } else {
                // get KeyValue for current tag
                KeyValue current = null;
                for (KeyValue tagsValue : appliedTagsValues) {
                    if (tagUUID.equals(tagsValue.getKey())) {
                        current = tagsValue;
                        break;
                    }
                }

                tag.setDeleteActionURL(
                        getActionURL(actionURLStart, Functions.getDeleteFacetUrl(current, currentQuery)));
                appliedTagsList.add(tag);
            }
        }
        Tag.setTotalCardinality(totalCardinality);

        // extract only the maxNumberOfTags most numerous tags
        final Map<String, Tag> tagCloud = new LinkedHashMap<String, Tag>(maxNumberOfTags);
        boolean stop = false;
        for (Set<Tag> tags1 : tagCounts.descendingMap().values()) {
            if (stop) {
                break;
            }

            for (Tag tag : tags1) {
                if (tagCloud.size() < maxNumberOfTags) {
                    String result = getActionURL(actionURLStart,
                            Functions.getFacetDrillDownUrl(tag.getFacetValue(), currentQuery));
                    tag.setActionURL(result);
                    tagCloud.put(tag.getName(), tag);
                } else {
                    stop = true;
                    break;
                }
            }
        }

        // put cloud and applied tags in their respective page context variables
        pageContext.setAttribute(cloudVar, tagCloud, PageContext.REQUEST_SCOPE);
        pageContext.setAttribute(appliedTags, appliedTagsList, PageContext.REQUEST_SCOPE);
    }
}

From source file:org.mule.api.routing.AggregationContext.java

/**
 * Returns a {@link NavigableMap} in which the key is a zero-based route index
 * and the value is an {@link Throwable} generated by it. Notice that this is a
 * collect operation. Each time this method is invoked the result will be
 * re-calculated//from w ww.ja  v a2s . c o  m
 * 
 * @return a @{link {@link NavigableMap}. It could be empty but it will never be
 *         <code>null</code>
 */
public NavigableMap<Integer, Throwable> collectRouteExceptions() {
    NavigableMap<Integer, Throwable> routes = new TreeMap<Integer, Throwable>();
    for (int i = 0; i < this.events.size(); i++) {
        MuleEvent event = this.events.get(i);
        if (failedEventsPredicate.evaluate(event)) {
            routes.put(i, event.getMessage().getExceptionPayload().getException());
        }
    }

    return routes;
}