Example usage for java.util NavigableSet size

List of usage examples for java.util NavigableSet size

Introduction

In this page you can find the example usage for java.util NavigableSet size.

Prototype

int size();

Source Link

Document

Returns the number of elements in this set (its cardinality).

Usage

From source file:org.apache.kylin.dict.DictionaryManager.java

private String checkDupByContent(DictionaryInfo dictInfo, Dictionary<String> dict) throws IOException {
    ResourceStore store = MetadataManager.getInstance(config).getStore();
    NavigableSet<String> existings = store.listResources(dictInfo.getResourceDir());
    if (existings == null)
        return null;

    logger.info("{} existing dictionaries of the same column", existings.size());
    if (existings.size() > 100) {
        logger.warn("Too many dictionaries under {}, dict count: {}", dictInfo.getResourceDir(),
                existings.size());//from   ww  w.j  a  v a  2 s . com
    }

    for (String existing : existings) {
        DictionaryInfo existingInfo = getDictionaryInfo(existing);
        if (existingInfo != null && dict.equals(existingInfo.getDictionaryObject())) {
            return existing;
        }
    }

    return null;
}

From source file:org.fedorahosted.freeotp.main.Activity.java

@Override
public void onSelectEvent(NavigableSet<Integer> selected) {
    if (mMenu == null)
        return;// ww w  .j  a v a2  s  .  c o m

    for (int i = 0; i < mMenu.size(); i++) {
        MenuItem mi = mMenu.getItem(i);

        switch (mi.getItemId()) {
        case R.id.action_about:
            mi.setVisible(selected.size() == 0);
            break;

        case R.id.action_up:
            mi.setVisible(selected.size() > 0);
            mi.setEnabled(!mTokenAdapter.isSelected(0));
            break;

        case R.id.action_down:
            mi.setVisible(selected.size() > 0);
            mi.setEnabled(!mTokenAdapter.isSelected(mTokenAdapter.getItemCount() - 1));
            break;

        case R.id.action_delete:
            mi.setVisible(selected.size() > 0);
            break;

        default:
            break;
        }
    }
}

From source file:com.inclouds.hbase.test.BaseTest.java

/**
* Creates the get./*from ww w  .j ava 2s . c o m*/
*
* @param row the row
* @param familyMap the family map
* @param tr the tr
* @param f the f
* @return the gets the
* @throws IOException Signals that an I/O exception has occurred.
*/
protected Get createGet(byte[] row, Map<byte[], NavigableSet<byte[]>> familyMap, TimeRange tr, Filter f)
        throws IOException {
    Get get = new Get(row);
    if (tr != null) {
        get.setTimeRange(tr.getMin(), tr.getMax());
    }
    if (f != null)
        get.setFilter(f);

    if (familyMap != null) {
        for (byte[] fam : familyMap.keySet()) {
            NavigableSet<byte[]> cols = familyMap.get(fam);
            if (cols == null || cols.size() == 0) {
                get.addFamily(fam);
            } else {
                for (byte[] col : cols) {
                    get.addColumn(fam, col);
                }
            }
        }
    }
    return get;
}

From source file:sadl.modellearner.rtiplus.SimplePDRTALearner.java

private int getTolerance(Interval in, int minData, Function<TDoubleList, Integer> f) {
    final NavigableSet<Integer> times = in.getTails().keySet();
    if (times.size() <= 2) {
        return getToleranceFewSlots(in, minData);
    }/* ww w.j  a v a2 s.  co  m*/
    final TDoubleList diffs = new TDoubleArrayList(times.size() - 1);
    final Iterator<Integer> it = times.iterator();
    if (it.hasNext()) {
        int prev = it.next().intValue();
        while (it.hasNext()) {
            final int curr = it.next().intValue();
            diffs.add(curr - prev - 1);
            prev = curr;
        }
    }
    return f.apply(diffs).intValue();
}

From source file:org.apache.hadoop.hbase.client.Scan.java

/**
 * Creates a new instance of this class while copying all values.
 *
 * @param scan  The scan instance to copy from.
 * @throws IOException When copying the values fails.
 *//*from  w ww  .j av  a 2s. c om*/
public Scan(Scan scan) throws IOException {
    startRow = scan.getStartRow();
    stopRow = scan.getStopRow();
    maxVersions = scan.getMaxVersions();
    batch = scan.getBatch();
    storeLimit = scan.getMaxResultsPerColumnFamily();
    storeOffset = scan.getRowOffsetPerColumnFamily();
    caching = scan.getCaching();
    maxResultSize = scan.getMaxResultSize();
    cacheBlocks = scan.getCacheBlocks();
    getScan = scan.isGetScan();
    filter = scan.getFilter(); // clone?
    loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
    TimeRange ctr = scan.getTimeRange();
    tr = new TimeRange(ctr.getMin(), ctr.getMax());
    Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
    for (Map.Entry<byte[], NavigableSet<byte[]>> entry : fams.entrySet()) {
        byte[] fam = entry.getKey();
        NavigableSet<byte[]> cols = entry.getValue();
        if (cols != null && cols.size() > 0) {
            for (byte[] col : cols) {
                addColumn(fam, col);
            }
        } else {
            addFamily(fam);
        }
    }
    for (Map.Entry<String, byte[]> attr : scan.getAttributesMap().entrySet()) {
        setAttribute(attr.getKey(), attr.getValue());
    }
}

From source file:org.apache.hadoop.hbase.master.LoadBalancer.java

/**
 * Find the block locations for all of the files for the specified region.
 *
 * Returns an ordered list of hosts that are hosting the blocks for this
 * region.  The weight of each host is the sum of the block lengths of all
 * files on that host, so the first host in the list is the server which
 * holds the most bytes of the given region's HFiles.
 *
 * TODO: Make this work.  Need to figure out how to match hadoop's hostnames
 *       given for block locations with our HServerAddress.
 * TODO: Use the right directory for the region
 * TODO: Use getFileBlockLocations on the files not the directory
 *
 * @param fs the filesystem//from   ww w  .j  a v  a 2 s  .  c  o m
 * @param region region
 * @return ordered list of hosts holding blocks of the specified region
 * @throws IOException if any filesystem errors
 */
@SuppressWarnings("unused")
private List<String> getTopBlockLocations(FileSystem fs, HRegionInfo region) throws IOException {
    String encodedName = region.getEncodedName();
    Path path = new Path("/hbase/table/" + encodedName);
    FileStatus status = fs.getFileStatus(path);
    BlockLocation[] blockLocations = fs.getFileBlockLocations(status, 0, status.getLen());
    Map<HostAndWeight, HostAndWeight> hostWeights = new TreeMap<HostAndWeight, HostAndWeight>(
            new HostAndWeight.HostComparator());
    for (BlockLocation bl : blockLocations) {
        String[] hosts = bl.getHosts();
        long len = bl.getLength();
        for (String host : hosts) {
            HostAndWeight haw = hostWeights.get(host);
            if (haw == null) {
                haw = new HostAndWeight(host, len);
                hostWeights.put(haw, haw);
            } else {
                haw.addWeight(len);
            }
        }
    }
    NavigableSet<HostAndWeight> orderedHosts = new TreeSet<HostAndWeight>(new HostAndWeight.WeightComparator());
    orderedHosts.addAll(hostWeights.values());
    List<String> topHosts = new ArrayList<String>(orderedHosts.size());
    for (HostAndWeight haw : orderedHosts.descendingSet()) {
        topHosts.add(haw.getHost());
    }
    return topHosts;
}

From source file:org.apache.hadoop.hbase.regionserver.transactional.TrxTransactionState.java

private synchronized Cell[] getAllCells(final Scan scan) {
    //if (LOG.isTraceEnabled()) LOG.trace("getAllCells -- ENTRY");
    List<Cell> kvList = new ArrayList<Cell>();

    ListIterator<WriteAction> writeOrderIter = null;

    for (writeOrderIter = writeOrdering.listIterator(); writeOrderIter.hasNext();) {
        WriteAction action = writeOrderIter.next();
        byte[] row = action.getRow();
        List<Cell> kvs = action.getCells();

        if (scan.getStartRow() != null && !Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW)
                && Bytes.compareTo(row, scan.getStartRow()) < 0) {
            continue;
        }/*from   w  w  w  .ja va 2 s.  c  o m*/
        if (scan.getStopRow() != null && !Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)
                && Bytes.compareTo(row, scan.getStopRow()) > 0) {
            continue;
        }

        if (!scan.hasFamilies()) {
            kvList.addAll(kvs);
            continue;
        }
        // Pick only the Cell's that match the 'scan' specifications
        for (Cell lv_kv : kvs) {
            byte[] lv_kv_family = lv_kv.getFamilyArray();
            Map<byte[], NavigableSet<byte[]>> lv_familyMap = scan.getFamilyMap();
            NavigableSet<byte[]> set = lv_familyMap.get(lv_kv_family);
            if (set == null || set.size() == 0) {
                kvList.add(lv_kv);
                continue;
            }
            if (set.contains(lv_kv.getQualifierArray())) {
                kvList.add(lv_kv);
            }
        }
    }

    if (LOG.isTraceEnabled())
        LOG.trace("getAllCells -- EXIT kvList size = " + kvList.size());
    return kvList.toArray(new Cell[kvList.size()]);
}

From source file:org.apache.hadoop.hbase.regionserver.transactional.TrxTransactionState.java

private synchronized KeyValue[] getAllKVs(final Scan scan) {
    //if (LOG.isTraceEnabled()) LOG.trace("getAllKVs -- ENTRY");
    List<KeyValue> kvList = new ArrayList<KeyValue>();

    ListIterator<WriteAction> writeOrderIter = null;

    for (writeOrderIter = writeOrdering.listIterator(); writeOrderIter.hasNext();) {
        WriteAction action = writeOrderIter.next();
        byte[] row = action.getRow();
        List<KeyValue> kvs = action.getKeyValues();

        if (scan.getStartRow() != null && !Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW)
                && Bytes.compareTo(row, scan.getStartRow()) < 0) {
            continue;
        }/*from  www.j  a va2 s.c o m*/
        if (scan.getStopRow() != null && !Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)
                && Bytes.compareTo(row, scan.getStopRow()) > 0) {
            continue;
        }

        if (!scan.hasFamilies()) {
            kvList.addAll(kvs);
            continue;
        }

        // Pick only the Cell's that match the 'scan' specifications
        Map<byte[], NavigableSet<byte[]>> lv_familyMap = scan.getFamilyMap();
        for (KeyValue lv_kv : kvs) {
            byte[] lv_kv_family = lv_kv.getFamily();
            NavigableSet<byte[]> set = lv_familyMap.get(lv_kv_family);
            if (set == null || set.size() == 0) {
                kvList.add(lv_kv);
                continue;
            }
            if (set.contains(lv_kv.getQualifier())) {
                kvList.add(lv_kv);
            }
        }
    }

    if (LOG.isTraceEnabled())
        LOG.trace("getAllKVs -- EXIT kvList size = " + kvList.size());
    return kvList.toArray(new KeyValue[kvList.size()]);
}

From source file:org.apache.hadoop.hbase.regionserver.StoreScanner.java

/** An internal constructor. */
protected StoreScanner(Store store, boolean cacheBlocks, Scan scan, final NavigableSet<byte[]> columns,
        long ttl, int minVersions, long readPt) {
    this.readPt = readPt;
    this.store = store;
    this.cacheBlocks = cacheBlocks;
    isGet = scan.isGetScan();/*from  w w  w .  ja  v a 2  s.com*/
    int numCol = columns == null ? 0 : columns.size();
    explicitColumnQuery = numCol > 0;
    this.scan = scan;
    this.columns = columns;
    oldestUnexpiredTS = EnvironmentEdgeManager.currentTimeMillis() - ttl;
    this.minVersions = minVersions;

    if (store != null && ((HStore) store).getHRegion() != null
            && ((HStore) store).getHRegion().getBaseConf() != null) {
        this.maxRowSize = ((HStore) store).getHRegion().getBaseConf().getLong(HConstants.TABLE_MAX_ROWSIZE_KEY,
                HConstants.TABLE_MAX_ROWSIZE_DEFAULT);
    } else {
        this.maxRowSize = HConstants.TABLE_MAX_ROWSIZE_DEFAULT;
    }

    // We look up row-column Bloom filters for multi-column queries as part of
    // the seek operation. However, we also look the row-column Bloom filter
    // for multi-row (non-"get") scans because this is not done in
    // StoreFile.passesBloomFilter(Scan, SortedSet<byte[]>).
    useRowColBloom = numCol > 1 || (!isGet && numCol == 1);
    this.scanUsePread = scan.isSmall();
    // The parallel-seeking is on :
    // 1) the config value is *true*
    // 2) store has more than one store file
    if (store != null && ((HStore) store).getHRegion() != null && store.getStorefilesCount() > 1) {
        RegionServerServices rsService = ((HStore) store).getHRegion().getRegionServerServices();
        if (rsService == null
                || !rsService.getConfiguration().getBoolean(STORESCANNER_PARALLEL_SEEK_ENABLE, false))
            return;
        isParallelSeekEnabled = true;
        executor = rsService.getExecutorService();
    }
}

From source file:org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.java

/**
 * Constructs a ScanQueryMatcher for a Scan.
 * @param scan//w w  w .  ja  v a  2 s  .  c om
 * @param family
 * @param columns
 * @param ttl
 * @param rowComparator
 */
public ScanQueryMatcher(Scan scan, byte[] family, NavigableSet<byte[]> columns, long ttl,
        KeyValue.KeyComparator rowComparator, int minVersions, int maxVersions, boolean retainDeletesInOutput,
        long readPointToUse) {
    this.tr = scan.getTimeRange();
    this.rowComparator = rowComparator;
    this.deletes = new ScanDeleteTracker();
    this.stopRow = scan.getStopRow();
    this.startKey = KeyValue.createFirstOnRow(scan.getStartRow());
    this.filter = scan.getFilter();
    this.retainDeletesInOutput = retainDeletesInOutput;
    this.maxReadPointToTrackVersions = readPointToUse;

    // Single branch to deal with two types of reads (columns vs all in family)
    if (columns == null || columns.size() == 0) {
        // use a specialized scan for wildcard column tracker.
        //lifeng
        if (scan.getCT() != 0) {
            this.columns = new ScanWildcardColumnTracker(minVersions, maxVersions, ttl, scan.getCT(),
                    scan.getQT());
        } else {
            this.columns = new ScanWildcardColumnTracker(minVersions, maxVersions, ttl);
        }
    } else {
        // We can share the ExplicitColumnTracker, diff is we reset
        // between rows, not between storefiles.
        if (scan.getCT() != 0) {

            this.columns = new ExplicitColumnTracker(columns, minVersions, maxVersions, ttl, scan.getCT(),
                    scan.getQT());
        } else {
            this.columns = new ExplicitColumnTracker(columns, minVersions, maxVersions, ttl);
        }
        exactColumnQuery = true;
    }
}