Example usage for java.util NavigableSet add

List of usage examples for java.util NavigableSet add

Introduction

In this page you can find the example usage for java.util NavigableSet add.

Prototype

boolean add(E e);

Source Link

Document

Adds the specified element to this set if it is not already present (optional operation).

Usage

From source file:org.apache.hadoop.hbase.regionserver.ccindex.IndexedRegion.java

/** Return the columns needed for the update. */
private NavigableSet<byte[]> getColumnsForIndexes(Collection<IndexSpecification> indexes) {
    NavigableSet<byte[]> neededColumns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
    for (IndexSpecification indexSpec : indexes) {
        for (byte[] col : indexSpec.getAllColumns()) {
            neededColumns.add(col);
        }/*from   w ww  . j av  a2s.  c  o m*/
    }
    return neededColumns;
}

From source file:org.apache.hadoop.hbase.regionserver.Memcache.java

private void getRowKeyAtOrBefore(final ConcurrentSkipListSet<KeyValue> set, final KeyValue kv,
        final NavigableSet<KeyValue> candidates, final NavigableSet<KeyValue> deletes, final long now) {
    if (set.isEmpty()) {
        return;//from  w ww  .  java2 s  .com
    }
    // We want the earliest possible to start searching from.  Start before
    // the candidate key in case it turns out a delete came in later.
    KeyValue search = candidates.isEmpty() ? kv : candidates.first();

    // Get all the entries that come equal or after our search key
    SortedSet<KeyValue> tailset = set.tailSet(search);

    // if there are items in the tail map, there's either a direct match to
    // the search key, or a range of values between the first candidate key
    // and the ultimate search key (or the end of the cache)
    if (!tailset.isEmpty() && this.comparator.compareRows(tailset.first(), search) <= 0) {
        // Keep looking at cells as long as they are no greater than the 
        // ultimate search key and there's still records left in the map.
        KeyValue deleted = null;
        KeyValue found = null;
        for (Iterator<KeyValue> iterator = tailset.iterator(); iterator.hasNext()
                && (found == null || this.comparator.compareRows(found, kv) <= 0);) {
            found = iterator.next();
            if (this.comparator.compareRows(found, kv) <= 0) {
                if (found.isDeleteType()) {
                    Store.handleDeletes(found, candidates, deletes);
                    if (deleted == null) {
                        deleted = found;
                    }
                } else {
                    if (Store.notExpiredAndNotInDeletes(this.ttl, found, now, deletes)) {
                        candidates.add(found);
                    } else {
                        if (deleted == null) {
                            deleted = found;
                        }
                        // TODO: Check this removes the right key.
                        // Its expired.  Remove it.
                        iterator.remove();
                    }
                }
            }
        }
        if (candidates.isEmpty() && deleted != null) {
            getRowKeyBefore(set, deleted, candidates, deletes, now);
        }
    } else {
        // The tail didn't contain any keys that matched our criteria, or was 
        // empty. Examine all the keys that proceed our splitting point.
        getRowKeyBefore(set, search, candidates, deletes, now);
    }
}

From source file:org.apache.hadoop.hbase.regionserver.Memcache.java

private void getRowKeyBefore(ConcurrentSkipListSet<KeyValue> set, KeyValue search,
        NavigableSet<KeyValue> candidates, final NavigableSet<KeyValue> deletes, final long now) {
    NavigableSet<KeyValue> headSet = set.headSet(search);
    // If we tried to create a headMap and got an empty map, then there are
    // no keys at or before the search key, so we're done.
    if (headSet.isEmpty()) {
        return;//w  w w  .  ja  v  a 2 s  . c o  m
    }

    // If there aren't any candidate keys at this point, we need to search
    // backwards until we find at least one candidate or run out of headMap.
    if (candidates.isEmpty()) {
        KeyValue lastFound = null;
        for (Iterator<KeyValue> i = headSet.descendingIterator(); i.hasNext();) {
            KeyValue found = i.next();
            // if the last row we found a candidate key for is different than
            // the row of the current candidate, we can stop looking -- if its
            // not a delete record.
            boolean deleted = found.isDeleteType();
            if (lastFound != null && this.comparator.matchingRows(lastFound, found) && !deleted) {
                break;
            }
            // If this isn't a delete, record it as a candidate key. Also 
            // take note of this candidate so that we'll know when
            // we cross the row boundary into the previous row.
            if (!deleted) {
                if (Store.notExpiredAndNotInDeletes(this.ttl, found, now, deletes)) {
                    lastFound = found;
                    candidates.add(found);
                } else {
                    // Its expired.
                    Store.expiredOrDeleted(set, found);
                }
            } else {
                // We are encountering items in reverse.  We may have just added
                // an item to candidates that this later item deletes.  Check.  If we
                // found something in candidates, remove it from the set.
                if (Store.handleDeletes(found, candidates, deletes)) {
                    remove(set, found);
                }
            }
        }
    } else {
        // If there are already some candidate keys, we only need to consider
        // the very last row's worth of keys in the headMap, because any 
        // smaller acceptable candidate keys would have caused us to start
        // our search earlier in the list, and we wouldn't be searching here.
        SortedSet<KeyValue> rowTailMap = headSet.tailSet(headSet.last().cloneRow(HConstants.LATEST_TIMESTAMP));
        Iterator<KeyValue> i = rowTailMap.iterator();
        do {
            KeyValue found = i.next();
            if (found.isDeleteType()) {
                Store.handleDeletes(found, candidates, deletes);
            } else {
                if (ttl == HConstants.FOREVER || now < found.getTimestamp() + ttl || !deletes.contains(found)) {
                    candidates.add(found);
                } else {
                    Store.expiredOrDeleted(set, found);
                }
            }
        } while (i.hasNext());
    }
}

From source file:org.apache.hadoop.hbase.regionserver.TestStoreScanner.java

NavigableSet<byte[]> getCols(String... strCols) {
    NavigableSet<byte[]> cols = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
    for (String col : strCols) {
        byte[] bytes = Bytes.toBytes(col);
        cols.add(bytes);
    }/*from w w w . j ava 2 s. c o m*/
    return cols;
}

From source file:org.apache.hadoop.hbase.regionserver.wal.HLogUtil.java

/**
 * Returns sorted set of edit files made by wal-log splitter, excluding files
 * with '.temp' suffix.//w w w .  j a v  a 2s  . c o  m
 *
 * @param fs
 * @param regiondir
 * @return Files in passed <code>regiondir</code> as a sorted set.
 * @throws IOException
 */
public static NavigableSet<Path> getSplitEditFilesSorted(final FileSystem fs, final Path regiondir)
        throws IOException {
    NavigableSet<Path> filesSorted = new TreeSet<Path>();
    Path editsdir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
    if (!fs.exists(editsdir))
        return filesSorted;
    FileStatus[] files = FSUtils.listStatus(fs, editsdir, new PathFilter() {
        @Override
        public boolean accept(Path p) {
            boolean result = false;
            try {
                // Return files and only files that match the editfile names pattern.
                // There can be other files in this directory other than edit files.
                // In particular, on error, we'll move aside the bad edit file giving
                // it a timestamp suffix. See moveAsideBadEditsFile.
                Matcher m = HLog.EDITFILES_NAME_PATTERN.matcher(p.getName());
                result = fs.isFile(p) && m.matches();
                // Skip the file whose name ends with RECOVERED_LOG_TMPFILE_SUFFIX,
                // because it means splithlog thread is writting this file.
                if (p.getName().endsWith(HLog.RECOVERED_LOG_TMPFILE_SUFFIX)) {
                    result = false;
                }
            } catch (IOException e) {
                LOG.warn("Failed isFile check on " + p);
            }
            return result;
        }
    });
    if (files == null)
        return filesSorted;
    for (FileStatus status : files) {
        filesSorted.add(status.getPath());
    }
    return filesSorted;
}

From source file:org.apache.hadoop.hbase.rsgroup.RSGroupInfoManagerImpl.java

private synchronized void refresh(boolean forceOnline) throws IOException {
    List<RSGroupInfo> groupList = new LinkedList<RSGroupInfo>();

    // overwrite anything read from zk, group table is source of truth
    // if online read from GROUP table
    if (forceOnline || isOnline()) {
        LOG.debug("Refreshing in Online mode.");
        if (rsGroupTable == null) {
            rsGroupTable = conn.getTable(RSGROUP_TABLE_NAME);
        }/*  www .  j a  v a  2  s  . c  o m*/
        groupList.addAll(rsGroupSerDe.retrieveGroupList(rsGroupTable));
    } else {
        LOG.debug("Refershing in Offline mode.");
        String groupBasePath = ZKUtil.joinZNode(watcher.baseZNode, rsGroupZNode);
        groupList.addAll(rsGroupSerDe.retrieveGroupList(watcher, groupBasePath));
    }

    // refresh default group, prune
    NavigableSet<TableName> orphanTables = new TreeSet<TableName>();
    for (String entry : master.getTableDescriptors().getAll().keySet()) {
        orphanTables.add(TableName.valueOf(entry));
    }

    List<TableName> specialTables;
    if (!master.isInitialized()) {
        specialTables = new ArrayList<TableName>();
        specialTables.add(AccessControlLists.ACL_TABLE_NAME);
        specialTables.add(TableName.META_TABLE_NAME);
        specialTables.add(TableName.NAMESPACE_TABLE_NAME);
        specialTables.add(RSGROUP_TABLE_NAME);
    } else {
        specialTables = master.listTableNamesByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR);
    }

    for (TableName table : specialTables) {
        orphanTables.add(table);
    }
    for (RSGroupInfo group : groupList) {
        if (!group.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {
            orphanTables.removeAll(group.getTables());
        }
    }

    // This is added to the last of the list
    // so it overwrites the default group loaded
    // from region group table or zk
    groupList.add(
            new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, Sets.newHashSet(getDefaultServers()), orphanTables));

    // populate the data
    HashMap<String, RSGroupInfo> newGroupMap = Maps.newHashMap();
    HashMap<TableName, String> newTableMap = Maps.newHashMap();
    for (RSGroupInfo group : groupList) {
        newGroupMap.put(group.getName(), group);
        for (TableName table : group.getTables()) {
            newTableMap.put(table, group.getName());
        }
    }
    rsGroupMap = Collections.unmodifiableMap(newGroupMap);
    tableMap = Collections.unmodifiableMap(newTableMap);

    prevRSGroups.clear();
    prevRSGroups.addAll(rsGroupMap.keySet());
}

From source file:org.apache.hadoop.hbase.wal.WALSplitter.java

/**
 * Returns sorted set of edit files made by splitter, excluding files
 * with '.temp' suffix.// ww w  . ja  va 2 s. c  o  m
 *
 * @param fs
 * @param regiondir
 * @return Files in passed <code>regiondir</code> as a sorted set.
 * @throws IOException
 */
public static NavigableSet<Path> getSplitEditFilesSorted(final FileSystem fs, final Path regiondir)
        throws IOException {
    NavigableSet<Path> filesSorted = new TreeSet<Path>();
    Path editsdir = getRegionDirRecoveredEditsDir(regiondir);
    if (!fs.exists(editsdir))
        return filesSorted;
    FileStatus[] files = FSUtils.listStatus(fs, editsdir, new PathFilter() {
        @Override
        public boolean accept(Path p) {
            boolean result = false;
            try {
                // Return files and only files that match the editfile names pattern.
                // There can be other files in this directory other than edit files.
                // In particular, on error, we'll move aside the bad edit file giving
                // it a timestamp suffix. See moveAsideBadEditsFile.
                Matcher m = EDITFILES_NAME_PATTERN.matcher(p.getName());
                result = fs.isFile(p) && m.matches();
                // Skip the file whose name ends with RECOVERED_LOG_TMPFILE_SUFFIX,
                // because it means splitwal thread is writting this file.
                if (p.getName().endsWith(RECOVERED_LOG_TMPFILE_SUFFIX)) {
                    result = false;
                }
                // Skip SeqId Files
                if (isSequenceIdFile(p)) {
                    result = false;
                }
            } catch (IOException e) {
                LOG.warn("Failed isFile check on " + p);
            }
            return result;
        }
    });
    if (files == null) {
        return filesSorted;
    }
    for (FileStatus status : files) {
        filesSorted.add(status.getPath());
    }
    return filesSorted;
}

From source file:org.apache.hadoop.hbase.wal.WALSplitUtil.java

/**
 * Returns sorted set of edit files made by splitter, excluding files with '.temp' suffix.
 * @param walFS WAL FileSystem used to retrieving split edits files.
 * @param regionDir WAL region dir to look for recovered edits files under.
 * @return Files in passed <code>regionDir</code> as a sorted set.
 * @throws IOException// www.  ja va 2  s. c  o m
 */
public static NavigableSet<Path> getSplitEditFilesSorted(final FileSystem walFS, final Path regionDir)
        throws IOException {
    NavigableSet<Path> filesSorted = new TreeSet<>();
    Path editsdir = getRegionDirRecoveredEditsDir(regionDir);
    if (!walFS.exists(editsdir)) {
        return filesSorted;
    }
    FileStatus[] files = FSUtils.listStatus(walFS, editsdir, new PathFilter() {
        @Override
        public boolean accept(Path p) {
            boolean result = false;
            try {
                // Return files and only files that match the editfile names pattern.
                // There can be other files in this directory other than edit files.
                // In particular, on error, we'll move aside the bad edit file giving
                // it a timestamp suffix. See moveAsideBadEditsFile.
                Matcher m = EDITFILES_NAME_PATTERN.matcher(p.getName());
                result = walFS.isFile(p) && m.matches();
                // Skip the file whose name ends with RECOVERED_LOG_TMPFILE_SUFFIX,
                // because it means splitwal thread is writting this file.
                if (p.getName().endsWith(RECOVERED_LOG_TMPFILE_SUFFIX)) {
                    result = false;
                }
                // Skip SeqId Files
                if (isSequenceIdFile(p)) {
                    result = false;
                }
            } catch (IOException e) {
                LOG.warn("Failed isFile check on {}", p, e);
            }
            return result;
        }
    });
    if (ArrayUtils.isNotEmpty(files)) {
        Arrays.asList(files).forEach(status -> filesSorted.add(status.getPath()));
    }
    return filesSorted;
}

From source file:org.apache.hadoop.hbase.ZKNamespaceManager.java

public NavigableSet<NamespaceDescriptor> list() throws IOException {
    NavigableSet<NamespaceDescriptor> ret = Sets
            .newTreeSet(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR);
    for (NamespaceDescriptor ns : cache.values()) {
        ret.add(ns);
    }/*  w  w  w. jav a 2 s. c o m*/
    return ret;
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.TestProportionalCapacityPreemptionPolicy.java

LeafQueue mockLeafQueue(ParentQueue p, float tot, int i, int[] abs, int[] used, int[] pending, int[] reserved,
        int[] apps, int[] gran) {
    LeafQueue lq = mock(LeafQueue.class);
    List<ApplicationAttemptId> appAttemptIdList = new ArrayList<ApplicationAttemptId>();
    when(lq.getTotalResourcePending()).thenReturn(Resource.newInstance(pending[i], 0));
    // consider moving where CapacityScheduler::comparator accessible
    NavigableSet<FiCaSchedulerApp> qApps = new TreeSet<FiCaSchedulerApp>(new Comparator<FiCaSchedulerApp>() {
        @Override/*from   ww  w .j  av  a 2 s  .co m*/
        public int compare(FiCaSchedulerApp a1, FiCaSchedulerApp a2) {
            return a1.getApplicationAttemptId().compareTo(a2.getApplicationAttemptId());
        }
    });
    // applications are added in global L->R order in queues
    if (apps[i] != 0) {
        int aUsed = used[i] / apps[i];
        int aPending = pending[i] / apps[i];
        int aReserve = reserved[i] / apps[i];
        for (int a = 0; a < apps[i]; ++a) {
            FiCaSchedulerApp mockFiCaApp = mockApp(i, appAlloc, aUsed, aPending, aReserve, gran[i]);
            qApps.add(mockFiCaApp);
            ++appAlloc;
            appAttemptIdList.add(mockFiCaApp.getApplicationAttemptId());
        }
        when(mCS.getAppsInQueue("queue" + (char) ('A' + i - 1))).thenReturn(appAttemptIdList);
    }
    when(lq.getApplications()).thenReturn(qApps);
    if (setAMResourcePercent != 0.0f) {
        when(lq.getMaxAMResourcePerQueuePercent()).thenReturn(setAMResourcePercent);
    }
    p.getChildQueues().add(lq);
    return lq;
}