Example usage for java.util NavigableSet size

List of usage examples for java.util NavigableSet size

Introduction

In this page you can find the example usage for java.util NavigableSet size.

Prototype

int size();

Source Link

Document

Returns the number of elements in this set (its cardinality).

Usage

From source file:org.apache.hadoop.hbase.client.coprocessor.AggregationClient.java

/**
 * This is the client side interface/handler for calling the median method for a
 * given cf-cq combination. This method collects the necessary parameters
 * to compute the median and returns the median.
 * @param table//w ww .  j a va 2  s  .  c om
 * @param ci
 * @param scan
 * @return R the median
 * @throws Throwable
 */
public <R, S, P extends Message, Q extends Message, T extends Message> R median(final HTable table,
        ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) throws Throwable {
    Pair<NavigableMap<byte[], List<S>>, List<S>> p = getMedianArgs(table, ci, scan);
    byte[] startRow = null;
    byte[] colFamily = scan.getFamilies()[0];
    NavigableSet<byte[]> quals = scan.getFamilyMap().get(colFamily);
    NavigableMap<byte[], List<S>> map = p.getFirst();
    S sumVal = p.getSecond().get(0);
    S sumWeights = p.getSecond().get(1);
    double halfSumVal = ci.divideForAvg(sumVal, 2L);
    double movingSumVal = 0;
    boolean weighted = false;
    if (quals.size() > 1) {
        weighted = true;
        halfSumVal = ci.divideForAvg(sumWeights, 2L);
    }

    for (Map.Entry<byte[], List<S>> entry : map.entrySet()) {
        S s = weighted ? entry.getValue().get(1) : entry.getValue().get(0);
        double newSumVal = movingSumVal + ci.divideForAvg(s, 1L);
        if (newSumVal > halfSumVal)
            break; // we found the region with the median
        movingSumVal = newSumVal;
        startRow = entry.getKey();
    }
    // scan the region with median and find it
    Scan scan2 = new Scan(scan);
    // inherit stop row from method parameter
    if (startRow != null)
        scan2.setStartRow(startRow);
    ResultScanner scanner = null;
    try {
        int cacheSize = scan2.getCaching();
        if (!scan2.getCacheBlocks() || scan2.getCaching() < 2) {
            scan2.setCacheBlocks(true);
            cacheSize = 5;
            scan2.setCaching(cacheSize);
        }
        scanner = table.getScanner(scan2);
        Result[] results = null;
        byte[] qualifier = quals.pollFirst();
        // qualifier for the weight column
        byte[] weightQualifier = weighted ? quals.pollLast() : qualifier;
        R value = null;
        do {
            results = scanner.next(cacheSize);
            if (results != null && results.length > 0) {
                for (int i = 0; i < results.length; i++) {
                    Result r = results[i];
                    // retrieve weight
                    Cell kv = r.getColumnLatest(colFamily, weightQualifier);
                    R newValue = ci.getValue(colFamily, weightQualifier, kv);
                    S s = ci.castToReturnType(newValue);
                    double newSumVal = movingSumVal + ci.divideForAvg(s, 1L);
                    // see if we have moved past the median
                    if (newSumVal > halfSumVal) {
                        return value;
                    }
                    movingSumVal = newSumVal;
                    kv = r.getColumnLatest(colFamily, qualifier);
                    value = ci.getValue(colFamily, qualifier, kv);
                }
            }
        } while (results != null && results.length > 0);
    } finally {
        if (scanner != null) {
            scanner.close();
        }
    }
    return null;
}

From source file:org.apache.hadoop.hbase.client.transactional.TransactionalAggregationClient.java

/**
 * This is the client side interface/handler for calling the median method for a
 * given cf-cq combination. This method collects the necessary parameters
 * to compute the median and returns the median.
 * @param table/*  w w w  . j a  va  2  s  . c  om*/
 * @param ci
 * @param scan
 * @return R the median
 * @throws Throwable
 */
public <R, S, P extends Message, Q extends Message, T extends Message> R median(final long transactionId,
        final TransactionalTable table, ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) throws Throwable {
    Pair<NavigableMap<byte[], List<S>>, List<S>> p = getMedianArgs(transactionId, table, ci, scan);
    byte[] startRow = null;
    byte[] colFamily = scan.getFamilies()[0];
    NavigableSet<byte[]> quals = scan.getFamilyMap().get(colFamily);
    NavigableMap<byte[], List<S>> map = p.getFirst();
    S sumVal = p.getSecond().get(0);
    S sumWeights = p.getSecond().get(1);
    double halfSumVal = ci.divideForAvg(sumVal, 2L);
    double movingSumVal = 0;
    boolean weighted = false;
    if (quals.size() > 1) {
        weighted = true;
        halfSumVal = ci.divideForAvg(sumWeights, 2L);
    }

    for (Map.Entry<byte[], List<S>> entry : map.entrySet()) {
        S s = weighted ? entry.getValue().get(1) : entry.getValue().get(0);
        double newSumVal = movingSumVal + ci.divideForAvg(s, 1L);
        if (newSumVal > halfSumVal)
            break; // we found the region with the median
        movingSumVal = newSumVal;
        startRow = entry.getKey();
    }
    // scan the region with median and find it
    Scan scan2 = new Scan(scan);
    // inherit stop row from method parameter
    if (startRow != null)
        scan2.setStartRow(startRow);
    ResultScanner scanner = null;
    try {
        int cacheSize = scan2.getCaching();
        if (!scan2.getCacheBlocks() || scan2.getCaching() < 2) {
            scan2.setCacheBlocks(true);
            cacheSize = 5;
            scan2.setCaching(cacheSize);
        }
        scanner = table.getScanner(scan2);
        Result[] results = null;
        byte[] qualifier = quals.pollFirst();
        // qualifier for the weight column
        byte[] weightQualifier = weighted ? quals.pollLast() : qualifier;
        R value = null;
        do {
            results = scanner.next(cacheSize);
            if (results != null && results.length > 0) {
                for (int i = 0; i < results.length; i++) {
                    Result r = results[i];
                    // retrieve weight
                    Cell kv = r.getColumnLatest(colFamily, weightQualifier);
                    R newValue = ci.getValue(colFamily, weightQualifier, kv);
                    S s = ci.castToReturnType(newValue);
                    double newSumVal = movingSumVal + ci.divideForAvg(s, 1L);
                    // see if we have moved past the median
                    if (newSumVal > halfSumVal) {
                        return value;
                    }
                    movingSumVal = newSumVal;
                    kv = r.getColumnLatest(colFamily, qualifier);
                    value = ci.getValue(colFamily, qualifier, kv);
                }
            }
        } while (results != null && results.length > 0);
    } finally {
        if (scanner != null) {
            scanner.close();
        }
    }
    return null;
}

From source file:com.google.gwt.emultest.java.util.TreeSetTest.java

public void testPollLast() {
    NavigableSet<E> set = createNavigableSet();

    assertNull(set.pollLast());/*  ww w . j  a v a 2  s .  co  m*/
    assertEquals(0, set.size());

    set.add(getKeys()[0]);
    assertEquals(getKeys()[0], set.pollLast());
    assertEquals(0, set.size());

    set.add(getKeys()[0]);
    set.add(getKeys()[1]);
    assertEquals(getKeys()[1], set.pollLast());
    assertEquals(1, set.size());
    assertEquals(getKeys()[0], set.pollLast());
    assertEquals(0, set.size());
    assertNull(set.pollLast());
}

From source file:com.google.gwt.emultest.java.util.TreeSetTest.java

public void testPollFirst() {
    NavigableSet<E> set = createNavigableSet();

    assertNull(set.pollFirst());/*from ww  w.  j a  va  2s .  c o  m*/
    assertEquals(0, set.size());

    set.add(getKeys()[0]);
    assertEquals(getKeys()[0], set.pollFirst());
    assertEquals(0, set.size());

    set.add(getKeys()[0]);
    set.add(getKeys()[1]);
    assertEquals(getKeys()[0], set.pollFirst());
    assertEquals(1, set.size());
    assertEquals(getKeys()[1], set.pollFirst());
    assertEquals(0, set.size());
    assertNull(set.pollFirst());
}

From source file:org.apache.hadoop.hbase.master.TestDistributedLogSplitting.java

HTable installTable(ZooKeeperWatcher zkw, String tname, String fname, int nrs, int existingRegions)
        throws Exception {
    // Create a table with regions
    byte[] table = Bytes.toBytes(tname);
    byte[] family = Bytes.toBytes(fname);
    LOG.info("Creating table with " + nrs + " regions");
    HTable ht = TEST_UTIL.createTable(table, family);
    int numRegions = TEST_UTIL.createMultiRegions(conf, ht, family, nrs);
    assertEquals(nrs, numRegions);//  ww  w .  jav a 2 s.c  om
    LOG.info("Waiting for no more RIT\n");
    blockUntilNoRIT(zkw, master);
    // disable-enable cycle to get rid of table's dead regions left behind
    // by createMultiRegions
    LOG.debug("Disabling table\n");
    TEST_UTIL.getHBaseAdmin().disableTable(table);
    LOG.debug("Waiting for no more RIT\n");
    blockUntilNoRIT(zkw, master);
    NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
    LOG.debug("Verifying only catalog and namespace regions are assigned\n");
    if (regions.size() != 2) {
        for (String oregion : regions)
            LOG.debug("Region still online: " + oregion);
    }
    assertEquals(2 + existingRegions, regions.size());
    LOG.debug("Enabling table\n");
    TEST_UTIL.getHBaseAdmin().enableTable(table);
    LOG.debug("Waiting for no more RIT\n");
    blockUntilNoRIT(zkw, master);
    LOG.debug("Verifying there are " + numRegions + " assigned on cluster\n");
    regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
    assertEquals(numRegions + 2 + existingRegions, regions.size());
    return ht;
}

From source file:com.google.gwt.emultest.java.util.TreeSetTest.java

public void testDescendingIterator() {
    NavigableSet<E> set = createNavigableSet();
    set.add(getKeys()[0]);//from  www .jav a2  s .  co  m

    Iterator<E> descendingIterator = set.descendingIterator();
    _assertEquals(set, reverseCollection(asCollection(descendingIterator)));

    set.add(getKeys()[1]);
    set.add(getKeys()[2]);
    descendingIterator = set.descendingIterator();
    _assertEquals(set, reverseCollection(asCollection(descendingIterator)));

    descendingIterator = set.descendingIterator();
    while (descendingIterator.hasNext()) {
        descendingIterator.next();
        descendingIterator.remove();
    }
    assertEquals(0, set.size());
}

From source file:com.google.gwt.emultest.java.util.TreeSetTest.java

public void testDescendingSet() {
    NavigableSet<E> set = createNavigableSet();
    set.add(getKeys()[0]);// w ww  .  j  ava 2  s. c  om

    NavigableSet<E> descendingSet = set.descendingSet();
    _assertEquals(descendingSet, descendingSet);
    _assertEquals(set.descendingSet(), descendingSet);

    set.add(getKeys()[1]);
    set.add(getKeys()[2]);
    _assertEquals(reverseCollection(set), descendingSet);
    _assertEquals(set, descendingSet.descendingSet());

    set.remove(getKeys()[1]);
    _assertEquals(reverseCollection(set), descendingSet);

    descendingSet.add(getKeys()[0]);
    _assertEquals(reverseCollection(set), descendingSet);

    descendingSet.remove(getKeys()[1]);
    _assertEquals(reverseCollection(set), descendingSet);

    descendingSet.clear();
    assertEquals(0, descendingSet.size());
    _assertEquals(set, descendingSet);
}

From source file:com.inclouds.hbase.rowcache.RowCache.java

/**
 * FIXME - optimize.//from w ww.ja v  a2s  .c o  m
 * 
 * @param family
 *          the family
 * @param column
 *          the column
 * @return true, if successful
 */
private boolean shouldSkipColumn(byte[] family, byte[] column) {

    RequestContext context = contextTLS.get();
    Map<byte[], NavigableSet<byte[]>> map = context.getFamilyMap();
    NavigableSet<byte[]> cols = map.get(family);
    if (cols == null || cols.size() == 0)
        return false;
    return cols.contains(column) == false;

}

From source file:org.apache.hadoop.hbase.regionserver.HRegion.java

/**
 * Read the edits log put under this region by wal log splitting process.  Put
 * the recovered edits back up into this region.
 *
 * <p>We can ignore any log message that has a sequence ID that's equal to or
 * lower than minSeqId.  (Because we know such log messages are already
 * reflected in the HFiles.)//from  w w  w  .java  2  s. c o m
 *
 * <p>While this is running we are putting pressure on memory yet we are
 * outside of our usual accounting because we are not yet an onlined region
 * (this stuff is being run as part of Region initialization).  This means
 * that if we're up against global memory limits, we'll not be flagged to flush
 * because we are not online. We can't be flushed by usual mechanisms anyways;
 * we're not yet online so our relative sequenceids are not yet aligned with
 * HLog sequenceids -- not till we come up online, post processing of split
 * edits.
 *
 * <p>But to help relieve memory pressure, at least manage our own heap size
 * flushing if are in excess of per-region limits.  Flushing, though, we have
 * to be careful and avoid using the regionserver/hlog sequenceid.  Its running
 * on a different line to whats going on in here in this region context so if we
 * crashed replaying these edits, but in the midst had a flush that used the
 * regionserver log with a sequenceid in excess of whats going on in here
 * in this region and with its split editlogs, then we could miss edits the
 * next time we go to recover. So, we have to flush inline, using seqids that
 * make sense in a this single region context only -- until we online.
 *
 * @param maxSeqIdInStores Any edit found in split editlogs needs to be in excess of
 * the maxSeqId for the store to be applied, else its skipped.
 * @return the sequence id of the last edit added to this region out of the
 * recovered edits log or <code>minSeqId</code> if nothing added from editlogs.
 * @throws UnsupportedEncodingException
 * @throws IOException
 */
protected long replayRecoveredEditsIfAny(final Path regiondir, Map<byte[], Long> maxSeqIdInStores,
        final CancelableProgressable reporter, final MonitoredTask status)
        throws UnsupportedEncodingException, IOException {
    long minSeqIdForTheRegion = -1;
    for (Long maxSeqIdInStore : maxSeqIdInStores.values()) {
        if (maxSeqIdInStore < minSeqIdForTheRegion || minSeqIdForTheRegion == -1) {
            minSeqIdForTheRegion = maxSeqIdInStore;
        }
    }
    long seqid = minSeqIdForTheRegion;

    FileSystem fs = this.fs.getFileSystem();
    NavigableSet<Path> files = HLogUtil.getSplitEditFilesSorted(fs, regiondir);
    if (LOG.isDebugEnabled()) {
        LOG.debug(
                "Found " + (files == null ? 0 : files.size()) + " recovered edits file(s) under " + regiondir);
    }

    if (files == null || files.isEmpty())
        return seqid;

    for (Path edits : files) {
        if (edits == null || !fs.exists(edits)) {
            LOG.warn("Null or non-existent edits file: " + edits);
            continue;
        }
        if (isZeroLengthThenDelete(fs, edits))
            continue;

        long maxSeqId;
        String fileName = edits.getName();
        maxSeqId = Math.abs(Long.parseLong(fileName));
        if (maxSeqId <= minSeqIdForTheRegion) {
            if (LOG.isDebugEnabled()) {
                String msg = "Maximum sequenceid for this log is " + maxSeqId
                        + " and minimum sequenceid for the region is " + minSeqIdForTheRegion
                        + ", skipped the whole file, path=" + edits;
                LOG.debug(msg);
            }
            continue;
        }

        try {
            // replay the edits. Replay can return -1 if everything is skipped, only update if seqId is greater
            seqid = Math.max(seqid, replayRecoveredEdits(edits, maxSeqIdInStores, reporter));
        } catch (IOException e) {
            boolean skipErrors = conf.getBoolean(HConstants.HREGION_EDITS_REPLAY_SKIP_ERRORS,
                    conf.getBoolean("hbase.skip.errors", HConstants.DEFAULT_HREGION_EDITS_REPLAY_SKIP_ERRORS));
            if (conf.get("hbase.skip.errors") != null) {
                LOG.warn("The property 'hbase.skip.errors' has been deprecated. Please use "
                        + HConstants.HREGION_EDITS_REPLAY_SKIP_ERRORS + " instead.");
            }
            if (skipErrors) {
                Path p = HLogUtil.moveAsideBadEditsFile(fs, edits);
                LOG.error(HConstants.HREGION_EDITS_REPLAY_SKIP_ERRORS + "=true so continuing. Renamed " + edits
                        + " as " + p, e);
            } else {
                throw e;
            }
        }
    }
    // The edits size added into rsAccounting during this replaying will not
    // be required any more. So just clear it.
    if (this.rsAccounting != null) {
        this.rsAccounting.clearRegionReplayEditsSize(this.getRegionName());
    }
    if (seqid > minSeqIdForTheRegion) {
        // Then we added some edits to memory. Flush and cleanup split edit files.
        internalFlushcache(null, seqid, status);
    }
    // Now delete the content of recovered edits.  We're done w/ them.
    for (Path file : files) {
        if (!fs.delete(file, false)) {
            LOG.error("Failed delete of " + file);
        } else {
            LOG.debug("Deleted recovered.edits file=" + file);
        }
    }
    return seqid;
}

From source file:org.apache.nifi.cluster.manager.impl.WebClusterManager.java

/**
 * Merges the listing requests in the specified map into the specified listing request
 *
 * @param listingRequest the target listing request
 * @param listingRequestMap the mapping of all responses being merged
 *//*from  w  w  w  .  j  av  a 2  s  . com*/
private void mergeListingRequests(final ListingRequestDTO listingRequest,
        final Map<NodeIdentifier, ListingRequestDTO> listingRequestMap) {
    final Comparator<FlowFileSummaryDTO> comparator = new Comparator<FlowFileSummaryDTO>() {
        @Override
        public int compare(final FlowFileSummaryDTO dto1, final FlowFileSummaryDTO dto2) {
            int positionCompare = dto1.getPosition().compareTo(dto2.getPosition());
            if (positionCompare != 0) {
                return positionCompare;
            }

            final String address1 = dto1.getClusterNodeAddress();
            final String address2 = dto2.getClusterNodeAddress();
            if (address1 == null && address2 == null) {
                return 0;
            }
            if (address1 == null) {
                return 1;
            }
            if (address2 == null) {
                return -1;
            }
            return address1.compareTo(address2);
        }
    };

    final NavigableSet<FlowFileSummaryDTO> flowFileSummaries = new TreeSet<>(comparator);

    ListFlowFileState state = null;
    int numStepsCompleted = 0;
    int numStepsTotal = 0;
    int objectCount = 0;
    long byteCount = 0;
    boolean finished = true;
    for (final Map.Entry<NodeIdentifier, ListingRequestDTO> entry : listingRequestMap.entrySet()) {
        final NodeIdentifier nodeIdentifier = entry.getKey();
        final String nodeAddress = nodeIdentifier.getApiAddress() + ":" + nodeIdentifier.getApiPort();

        final ListingRequestDTO nodeRequest = entry.getValue();

        numStepsTotal++;
        if (Boolean.TRUE.equals(nodeRequest.getFinished())) {
            numStepsCompleted++;
        }

        final QueueSizeDTO nodeQueueSize = nodeRequest.getQueueSize();
        objectCount += nodeQueueSize.getObjectCount();
        byteCount += nodeQueueSize.getByteCount();

        if (!nodeRequest.getFinished()) {
            finished = false;
        }

        if (nodeRequest.getLastUpdated().after(listingRequest.getLastUpdated())) {
            listingRequest.setLastUpdated(nodeRequest.getLastUpdated());
        }

        // Keep the state with the lowest ordinal value (the "least completed").
        final ListFlowFileState nodeState = ListFlowFileState.valueOfDescription(nodeRequest.getState());
        if (state == null || state.compareTo(nodeState) > 0) {
            state = nodeState;
        }

        if (nodeRequest.getFlowFileSummaries() != null) {
            for (final FlowFileSummaryDTO summaryDTO : nodeRequest.getFlowFileSummaries()) {
                summaryDTO.setClusterNodeId(nodeIdentifier.getId());
                summaryDTO.setClusterNodeAddress(nodeAddress);

                flowFileSummaries.add(summaryDTO);

                // Keep the set from growing beyond our max
                if (flowFileSummaries.size() > listingRequest.getMaxResults()) {
                    flowFileSummaries.pollLast();
                }
            }
        }

        if (nodeRequest.getFailureReason() != null) {
            listingRequest.setFailureReason(nodeRequest.getFailureReason());
        }
    }

    final List<FlowFileSummaryDTO> summaryDTOs = new ArrayList<>(flowFileSummaries);
    listingRequest.setFlowFileSummaries(summaryDTOs);

    final int percentCompleted = numStepsCompleted / numStepsTotal;
    listingRequest.setPercentCompleted(percentCompleted);
    listingRequest.setFinished(finished);

    listingRequest.getQueueSize().setByteCount(byteCount);
    listingRequest.getQueueSize().setObjectCount(objectCount);
}