Example usage for java.util NavigableSet isEmpty

List of usage examples for java.util NavigableSet isEmpty

Introduction

In this page you can find the example usage for java.util NavigableSet isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this set contains no elements.

Usage

From source file:org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint.java

/**
 * Gives the row count for the given column family and column qualifier, in
 * the given row range as defined in the Scan object.
 * @throws IOException/*  w  w w  . ja  v  a  2s.  c om*/
 */
@Override
public void getRowNum(RpcController controller, TransactionalAggregateRequest request,
        RpcCallback<TransactionalAggregateResponse> done) {
    TransactionalAggregateResponse response = null;
    long counter = 0L;
    List<Cell> results = new ArrayList<Cell>();
    RegionScanner scanner = null;
    long transactionId = 0L;
    try {
        Scan scan = ProtobufUtil.toScan(request.getScan());
        byte[][] colFamilies = scan.getFamilies();
        byte[] colFamily = colFamilies != null ? colFamilies[0] : null;
        NavigableSet<byte[]> qualifiers = colFamilies != null ? scan.getFamilyMap().get(colFamily) : null;
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        if (scan.getFilter() == null && qualifier == null)
            scan.setFilter(new FirstKeyOnlyFilter());
        transactionId = request.getTransactionId();
        scanner = getScanner(transactionId, scan);
        boolean hasMoreRows = false;
        do {
            hasMoreRows = scanner.next(results);
            if (results.size() > 0) {
                counter++;
            }
            results.clear();
        } while (hasMoreRows);
        ByteBuffer bb = ByteBuffer.allocate(8).putLong(counter);
        bb.rewind();
        response = TransactionalAggregateResponse.newBuilder().addFirstPart(ByteString.copyFrom(bb)).build();
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }

    if (LOG.isInfoEnabled())
        LOG.info(String.format(
                "Row counter for txId %d from this region: %s is %d, startKey is [%s], endKey is [%s]",
                transactionId, env.getRegion().getRegionNameAsString(), counter,
                env.getRegion().getStartKey() == null ? "null"
                        : Bytes.toStringBinary(env.getRegion().getStartKey()),
                env.getRegion().getEndKey() == null ? "null"
                        : Bytes.toStringBinary(env.getRegion().getEndKey())));

    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint.java

/**
 * Gives a Pair with first object as Sum and second object as row count,
 * computed for a given combination of column qualifier and column family in
 * the given row range as defined in the Scan object. In its current
 * implementation, it takes one column family and one column qualifier (if
 * provided). In case of null column qualifier, an aggregate sum over all the
 * entire column family will be returned.
 * <p>//from   w ww .j a va 2 s  .c  o  m
 * The average is computed in
 * AggregationClient#avg(byte[], ColumnInterpreter, Scan) by
 * processing results from all regions, so its "ok" to pass sum and a Long
 * type.
 */
@Override
public void getAvg(RpcController controller, TransactionalAggregateRequest request,
        RpcCallback<TransactionalAggregateResponse> done) {
    TransactionalAggregateResponse response = null;
    RegionScanner scanner = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        S sumVal = null;
        Long rowCountVal = 0l;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        long transactionId = request.getTransactionId();
        scanner = getScanner(transactionId, scan);
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        List<Cell> results = new ArrayList<Cell>();
        boolean hasMoreRows = false;

        do {
            results.clear();
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                sumVal = ci.add(sumVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, kv)));
            }
            rowCountVal++;
        } while (hasMoreRows);
        if (sumVal != null) {
            ByteString first = ci.getProtoForPromotedType(sumVal).toByteString();
            TransactionalAggregateResponse.Builder pair = TransactionalAggregateResponse.newBuilder();
            pair.addFirstPart(first);
            ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal);
            bb.rewind();
            pair.setSecondPart(ByteString.copyFrom(bb));
            response = pair.build();
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint.java

/**
 * Gives a Pair with first object a List containing Sum and sum of squares,
 * and the second object as row count. It is computed for a given combination of
 * column qualifier and column family in the given row range as defined in the
 * Scan object. In its current implementation, it takes one column family and
 * one column qualifier (if provided). The idea is get the value of variance first:
 * the average of the squares less the square of the average a standard
 * deviation is square root of variance.
 *//*from   w w w.  ja v a 2 s . co m*/
@Override
public void getStd(RpcController controller, TransactionalAggregateRequest request,
        RpcCallback<TransactionalAggregateResponse> done) {
    RegionScanner scanner = null;
    TransactionalAggregateResponse response = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        S sumVal = null, sumSqVal = null, tempVal = null;
        long rowCountVal = 0l;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        long transactionId = request.getTransactionId();
        scanner = getScanner(transactionId, scan);
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        List<Cell> results = new ArrayList<Cell>();

        boolean hasMoreRows = false;

        do {
            tempVal = null;
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, kv)));
            }
            results.clear();
            sumVal = ci.add(sumVal, tempVal);
            sumSqVal = ci.add(sumSqVal, ci.multiply(tempVal, tempVal));
            rowCountVal++;
        } while (hasMoreRows);
        if (sumVal != null) {
            ByteString first_sumVal = ci.getProtoForPromotedType(sumVal).toByteString();
            ByteString first_sumSqVal = ci.getProtoForPromotedType(sumSqVal).toByteString();
            TransactionalAggregateResponse.Builder pair = TransactionalAggregateResponse.newBuilder();
            pair.addFirstPart(first_sumVal);
            pair.addFirstPart(first_sumSqVal);
            ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal);
            bb.rewind();
            pair.setSecondPart(ByteString.copyFrom(bb));
            response = pair.build();
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint.java

/**
 * Gives a List containing sum of values and sum of weights.
 * It is computed for the combination of column
 * family and column qualifier(s) in the given row range as defined in the
 * Scan object. In its current implementation, it takes one column family and
 * two column qualifiers. The first qualifier is for values column and 
 * the second qualifier (optional) is for weight column.
 *//*from  w  w w . j  a  v a 2  s . c o  m*/
@Override
public void getMedian(RpcController controller, TransactionalAggregateRequest request,
        RpcCallback<TransactionalAggregateResponse> done) {
    TransactionalAggregateResponse response = null;
    RegionScanner scanner = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        S sumVal = null, sumWeights = null, tempVal = null, tempWeight = null;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        long transactionId = request.getTransactionId();
        scanner = getScanner(transactionId, scan);
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] valQualifier = null, weightQualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            valQualifier = qualifiers.pollFirst();
            // if weighted median is requested, get qualifier for the weight column
            weightQualifier = qualifiers.pollLast();
        }
        List<Cell> results = new ArrayList<Cell>();

        boolean hasMoreRows = false;

        do {
            tempVal = null;
            tempWeight = null;
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, valQualifier, kv)));
                if (weightQualifier != null) {
                    tempWeight = ci.add(tempWeight,
                            ci.castToReturnType(ci.getValue(colFamily, weightQualifier, kv)));
                }
            }
            results.clear();
            sumVal = ci.add(sumVal, tempVal);
            sumWeights = ci.add(sumWeights, tempWeight);
        } while (hasMoreRows);
        ByteString first_sumVal = ci.getProtoForPromotedType(sumVal).toByteString();
        S s = sumWeights == null ? ci.castToReturnType(ci.getMinValue()) : sumWeights;
        ByteString first_sumWeights = ci.getProtoForPromotedType(s).toByteString();
        TransactionalAggregateResponse.Builder pair = TransactionalAggregateResponse.newBuilder();
        pair.addFirstPart(first_sumVal);
        pair.addFirstPart(first_sumWeights);
        response = pair.build();
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    done.run(response);
}

From source file:org.apache.hadoop.hbase.regionserver.DefaultMemStore.java

private void getRowKeyAtOrBefore(final NavigableSet<KeyValue> set, final GetClosestRowBeforeTracker state) {
    if (set.isEmpty()) {
        return;/*from www . ja  v  a 2  s  .  com*/
    }
    if (!walkForwardInSingleRow(set, state.getTargetKey(), state)) {
        // Found nothing in row.  Try backing up.
        getRowKeyBefore(set, state);
    }
}

From source file:org.apache.hadoop.hbase.regionserver.DefaultMemStore.java

private Member memberOfPreviousRow(NavigableSet<KeyValue> set, final GetClosestRowBeforeTracker state,
        final KeyValue firstOnRow) {
    NavigableSet<KeyValue> head = set.headSet(firstOnRow, false);
    if (head.isEmpty())
        return null;
    for (Iterator<KeyValue> i = head.descendingIterator(); i.hasNext();) {
        KeyValue found = i.next();
        if (state.isExpired(found)) {
            i.remove();//from ww w  .j  a  v a2 s  .  co  m
            continue;
        }
        return new Member(head, found);
    }
    return null;
}

From source file:org.apache.hadoop.hbase.regionserver.HRegion.java

/**
 * Read the edits log put under this region by wal log splitting process.  Put
 * the recovered edits back up into this region.
 *
 * <p>We can ignore any log message that has a sequence ID that's equal to or
 * lower than minSeqId.  (Because we know such log messages are already
 * reflected in the HFiles.)/*w ww.ja va  2  s  .com*/
 *
 * <p>While this is running we are putting pressure on memory yet we are
 * outside of our usual accounting because we are not yet an onlined region
 * (this stuff is being run as part of Region initialization).  This means
 * that if we're up against global memory limits, we'll not be flagged to flush
 * because we are not online. We can't be flushed by usual mechanisms anyways;
 * we're not yet online so our relative sequenceids are not yet aligned with
 * HLog sequenceids -- not till we come up online, post processing of split
 * edits.
 *
 * <p>But to help relieve memory pressure, at least manage our own heap size
 * flushing if are in excess of per-region limits.  Flushing, though, we have
 * to be careful and avoid using the regionserver/hlog sequenceid.  Its running
 * on a different line to whats going on in here in this region context so if we
 * crashed replaying these edits, but in the midst had a flush that used the
 * regionserver log with a sequenceid in excess of whats going on in here
 * in this region and with its split editlogs, then we could miss edits the
 * next time we go to recover. So, we have to flush inline, using seqids that
 * make sense in a this single region context only -- until we online.
 *
 * @param maxSeqIdInStores Any edit found in split editlogs needs to be in excess of
 * the maxSeqId for the store to be applied, else its skipped.
 * @return the sequence id of the last edit added to this region out of the
 * recovered edits log or <code>minSeqId</code> if nothing added from editlogs.
 * @throws UnsupportedEncodingException
 * @throws IOException
 */
protected long replayRecoveredEditsIfAny(final Path regiondir, Map<byte[], Long> maxSeqIdInStores,
        final CancelableProgressable reporter, final MonitoredTask status)
        throws UnsupportedEncodingException, IOException {
    long minSeqIdForTheRegion = -1;
    for (Long maxSeqIdInStore : maxSeqIdInStores.values()) {
        if (maxSeqIdInStore < minSeqIdForTheRegion || minSeqIdForTheRegion == -1) {
            minSeqIdForTheRegion = maxSeqIdInStore;
        }
    }
    long seqid = minSeqIdForTheRegion;

    FileSystem fs = this.fs.getFileSystem();
    NavigableSet<Path> files = HLogUtil.getSplitEditFilesSorted(fs, regiondir);
    if (LOG.isDebugEnabled()) {
        LOG.debug(
                "Found " + (files == null ? 0 : files.size()) + " recovered edits file(s) under " + regiondir);
    }

    if (files == null || files.isEmpty())
        return seqid;

    for (Path edits : files) {
        if (edits == null || !fs.exists(edits)) {
            LOG.warn("Null or non-existent edits file: " + edits);
            continue;
        }
        if (isZeroLengthThenDelete(fs, edits))
            continue;

        long maxSeqId;
        String fileName = edits.getName();
        maxSeqId = Math.abs(Long.parseLong(fileName));
        if (maxSeqId <= minSeqIdForTheRegion) {
            if (LOG.isDebugEnabled()) {
                String msg = "Maximum sequenceid for this log is " + maxSeqId
                        + " and minimum sequenceid for the region is " + minSeqIdForTheRegion
                        + ", skipped the whole file, path=" + edits;
                LOG.debug(msg);
            }
            continue;
        }

        try {
            // replay the edits. Replay can return -1 if everything is skipped, only update if seqId is greater
            seqid = Math.max(seqid, replayRecoveredEdits(edits, maxSeqIdInStores, reporter));
        } catch (IOException e) {
            boolean skipErrors = conf.getBoolean(HConstants.HREGION_EDITS_REPLAY_SKIP_ERRORS,
                    conf.getBoolean("hbase.skip.errors", HConstants.DEFAULT_HREGION_EDITS_REPLAY_SKIP_ERRORS));
            if (conf.get("hbase.skip.errors") != null) {
                LOG.warn("The property 'hbase.skip.errors' has been deprecated. Please use "
                        + HConstants.HREGION_EDITS_REPLAY_SKIP_ERRORS + " instead.");
            }
            if (skipErrors) {
                Path p = HLogUtil.moveAsideBadEditsFile(fs, edits);
                LOG.error(HConstants.HREGION_EDITS_REPLAY_SKIP_ERRORS + "=true so continuing. Renamed " + edits
                        + " as " + p, e);
            } else {
                throw e;
            }
        }
    }
    // The edits size added into rsAccounting during this replaying will not
    // be required any more. So just clear it.
    if (this.rsAccounting != null) {
        this.rsAccounting.clearRegionReplayEditsSize(this.getRegionName());
    }
    if (seqid > minSeqIdForTheRegion) {
        // Then we added some edits to memory. Flush and cleanup split edit files.
        internalFlushcache(null, seqid, status);
    }
    // Now delete the content of recovered edits.  We're done w/ them.
    for (Path file : files) {
        if (!fs.delete(file, false)) {
            LOG.error("Failed delete of " + file);
        } else {
            LOG.debug("Deleted recovered.edits file=" + file);
        }
    }
    return seqid;
}

From source file:org.apache.hadoop.hbase.regionserver.Memcache.java

private void getRowKeyAtOrBefore(final ConcurrentSkipListSet<KeyValue> set, final KeyValue kv,
        final NavigableSet<KeyValue> candidates, final NavigableSet<KeyValue> deletes, final long now) {
    if (set.isEmpty()) {
        return;//from   w ww  . j  a v  a  2 s .co m
    }
    // We want the earliest possible to start searching from.  Start before
    // the candidate key in case it turns out a delete came in later.
    KeyValue search = candidates.isEmpty() ? kv : candidates.first();

    // Get all the entries that come equal or after our search key
    SortedSet<KeyValue> tailset = set.tailSet(search);

    // if there are items in the tail map, there's either a direct match to
    // the search key, or a range of values between the first candidate key
    // and the ultimate search key (or the end of the cache)
    if (!tailset.isEmpty() && this.comparator.compareRows(tailset.first(), search) <= 0) {
        // Keep looking at cells as long as they are no greater than the 
        // ultimate search key and there's still records left in the map.
        KeyValue deleted = null;
        KeyValue found = null;
        for (Iterator<KeyValue> iterator = tailset.iterator(); iterator.hasNext()
                && (found == null || this.comparator.compareRows(found, kv) <= 0);) {
            found = iterator.next();
            if (this.comparator.compareRows(found, kv) <= 0) {
                if (found.isDeleteType()) {
                    Store.handleDeletes(found, candidates, deletes);
                    if (deleted == null) {
                        deleted = found;
                    }
                } else {
                    if (Store.notExpiredAndNotInDeletes(this.ttl, found, now, deletes)) {
                        candidates.add(found);
                    } else {
                        if (deleted == null) {
                            deleted = found;
                        }
                        // TODO: Check this removes the right key.
                        // Its expired.  Remove it.
                        iterator.remove();
                    }
                }
            }
        }
        if (candidates.isEmpty() && deleted != null) {
            getRowKeyBefore(set, deleted, candidates, deletes, now);
        }
    } else {
        // The tail didn't contain any keys that matched our criteria, or was 
        // empty. Examine all the keys that proceed our splitting point.
        getRowKeyBefore(set, search, candidates, deletes, now);
    }
}

From source file:org.apache.hadoop.hbase.regionserver.Memcache.java

private void getRowKeyBefore(ConcurrentSkipListSet<KeyValue> set, KeyValue search,
        NavigableSet<KeyValue> candidates, final NavigableSet<KeyValue> deletes, final long now) {
    NavigableSet<KeyValue> headSet = set.headSet(search);
    // If we tried to create a headMap and got an empty map, then there are
    // no keys at or before the search key, so we're done.
    if (headSet.isEmpty()) {
        return;/*  w ww. j  a  v a  2  s.c  om*/
    }

    // If there aren't any candidate keys at this point, we need to search
    // backwards until we find at least one candidate or run out of headMap.
    if (candidates.isEmpty()) {
        KeyValue lastFound = null;
        for (Iterator<KeyValue> i = headSet.descendingIterator(); i.hasNext();) {
            KeyValue found = i.next();
            // if the last row we found a candidate key for is different than
            // the row of the current candidate, we can stop looking -- if its
            // not a delete record.
            boolean deleted = found.isDeleteType();
            if (lastFound != null && this.comparator.matchingRows(lastFound, found) && !deleted) {
                break;
            }
            // If this isn't a delete, record it as a candidate key. Also 
            // take note of this candidate so that we'll know when
            // we cross the row boundary into the previous row.
            if (!deleted) {
                if (Store.notExpiredAndNotInDeletes(this.ttl, found, now, deletes)) {
                    lastFound = found;
                    candidates.add(found);
                } else {
                    // Its expired.
                    Store.expiredOrDeleted(set, found);
                }
            } else {
                // We are encountering items in reverse.  We may have just added
                // an item to candidates that this later item deletes.  Check.  If we
                // found something in candidates, remove it from the set.
                if (Store.handleDeletes(found, candidates, deletes)) {
                    remove(set, found);
                }
            }
        }
    } else {
        // If there are already some candidate keys, we only need to consider
        // the very last row's worth of keys in the headMap, because any 
        // smaller acceptable candidate keys would have caused us to start
        // our search earlier in the list, and we wouldn't be searching here.
        SortedSet<KeyValue> rowTailMap = headSet.tailSet(headSet.last().cloneRow(HConstants.LATEST_TIMESTAMP));
        Iterator<KeyValue> i = rowTailMap.iterator();
        do {
            KeyValue found = i.next();
            if (found.isDeleteType()) {
                Store.handleDeletes(found, candidates, deletes);
            } else {
                if (ttl == HConstants.FOREVER || now < found.getTimestamp() + ttl || !deletes.contains(found)) {
                    candidates.add(found);
                } else {
                    Store.expiredOrDeleted(set, found);
                }
            }
        } while (i.hasNext());
    }
}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLogSplit.java

/**
 * @throws IOException//  www .j av a  2  s.  com
 * @see https://issues.apache.org/jira/browse/HBASE-4862
 */
@Test(timeout = 300000)
public void testConcurrentSplitLogAndReplayRecoverEdit() throws IOException {
    LOG.info("testConcurrentSplitLogAndReplayRecoverEdit");
    // Generate hlogs for our destination region
    String regionName = "r0";
    final Path regiondir = new Path(TABLEDIR, regionName);
    REGIONS = new ArrayList<String>();
    REGIONS.add(regionName);
    generateHLogs(-1);

    HLogFactory.createHLog(fs, regiondir, regionName, conf);
    FileStatus[] logfiles = fs.listStatus(HLOGDIR);
    assertTrue("There should be some log file", logfiles != null && logfiles.length > 0);

    HLogSplitter logSplitter = new HLogSplitter(conf, HBASEDIR, fs, null, null, null) {
        protected HLog.Writer createWriter(FileSystem fs, Path logfile, Configuration conf) throws IOException {
            HLog.Writer writer = HLogFactory.createRecoveredEditsWriter(fs, logfile, conf);
            // After creating writer, simulate region's
            // replayRecoveredEditsIfAny() which gets SplitEditFiles of this
            // region and delete them, excluding files with '.temp' suffix.
            NavigableSet<Path> files = HLogUtil.getSplitEditFilesSorted(fs, regiondir);
            if (files != null && !files.isEmpty()) {
                for (Path file : files) {
                    if (!this.fs.delete(file, false)) {
                        LOG.error("Failed delete of " + file);
                    } else {
                        LOG.debug("Deleted recovered.edits file=" + file);
                    }
                }
            }
            return writer;
        }
    };
    try {
        logSplitter.splitLogFile(logfiles[0], null);
    } catch (IOException e) {
        LOG.info(e);
        Assert.fail(
                "Throws IOException when spliting " + "log, it is most likely because writing file does not "
                        + "exist which is caused by concurrent replayRecoveredEditsIfAny()");
    }
    if (fs.exists(CORRUPTDIR)) {
        if (fs.listStatus(CORRUPTDIR).length > 0) {
            Assert.fail("There are some corrupt logs, "
                    + "it is most likely caused by concurrent replayRecoveredEditsIfAny()");
        }
    }
}