Example usage for java.util NavigableSet isEmpty

List of usage examples for java.util NavigableSet isEmpty

Introduction

In this page you can find the example usage for java.util NavigableSet isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this set contains no elements.

Usage

From source file:py.una.pol.karaku.menu.server.MenuServerLogic.java

private void handleRootMenu(Menu menu) {

    // Este hashSet se usa como un queue, solamente que es deseable que no
    // tenga valores repetidos
    NavigableSet<Menu> toSort = new TreeSet<Menu>();
    Map<Menu, Menu> parents = new HashMap<Menu, Menu>();
    toSort.add(menu);/* ww w.j  av a 2 s  .  c  o m*/
    Menu next;
    while (!toSort.isEmpty()) {

        next = toSort.pollFirst();
        handleMenu(next, parents.get(next));
        if (ListHelper.hasElements(next.getItems())) {
            sortInMemory(next.getItems());
            toSort.addAll(next.getItems());
            for (Menu m : next.getItems()) {
                parents.put(m, next);
            }
        }
    }
}

From source file:co.cask.tephra.hbase10.coprocessor.TransactionProcessor.java

/**
 * Ensures that family delete markers are present in the columns requested for any scan operation.
 * @param scan The original scan request
 * @return The modified scan request with the family delete qualifiers represented
 *//*from w w w. j a v  a2s .  c o  m*/
private Scan projectFamilyDeletes(Scan scan) {
    for (Map.Entry<byte[], NavigableSet<byte[]>> entry : scan.getFamilyMap().entrySet()) {
        NavigableSet<byte[]> columns = entry.getValue();
        // wildcard scans will automatically include the delete marker, so only need to add it when we have
        // explicit columns listed
        if (columns != null && !columns.isEmpty()) {
            scan.addColumn(entry.getKey(), TxConstants.FAMILY_DELETE_QUALIFIER);
        }
    }
    return scan;
}

From source file:co.cask.tephra.hbase10.coprocessor.TransactionProcessor.java

/**
 * Ensures that family delete markers are present in the columns requested for any get operation.
 * @param get The original get request/*from ww w  .  j a  va 2  s .com*/
 * @return The modified get request with the family delete qualifiers represented
 */
private Get projectFamilyDeletes(Get get) {
    for (Map.Entry<byte[], NavigableSet<byte[]>> entry : get.getFamilyMap().entrySet()) {
        NavigableSet<byte[]> columns = entry.getValue();
        // wildcard scans will automatically include the delete marker, so only need to add it when we have
        // explicit columns listed
        if (columns != null && !columns.isEmpty()) {
            get.addColumn(entry.getKey(), TxConstants.FAMILY_DELETE_QUALIFIER);
        }
    }
    return get;
}

From source file:org.apache.hadoop.hbase.coprocessor.AggregateImplementation.java

/**
 * Gives the maximum for a given combination of column qualifier and column
 * family, in the given row range as defined in the Scan object. In its
 * current implementation, it takes one column family and one column qualifier
 * (if provided). In case of null column qualifier, maximum value for the
 * entire column family will be returned.
 *//*  www.  ja va 2s . c  o m*/
@Override
public void getMax(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
    InternalScanner scanner = null;
    AggregateResponse response = null;
    T max = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        T temp;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        scanner = env.getRegion().getScanner(scan);
        List<Cell> results = new ArrayList<Cell>();
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        // qualifier can be null.
        boolean hasMoreRows = false;
        do {
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                temp = ci.getValue(colFamily, qualifier, kv);
                max = (max == null || (temp != null && ci.compare(temp, max) > 0)) ? temp : max;
            }
            results.clear();
        } while (hasMoreRows);
        if (max != null) {
            AggregateResponse.Builder builder = AggregateResponse.newBuilder();
            builder.addFirstPart(ci.getProtoForCellType(max).toByteString());
            response = builder.build();
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    log.info("Maximum from this region is " + env.getRegion().getRegionNameAsString() + ": " + max);
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.AggregateImplementation.java

/**
 * Gives the minimum for a given combination of column qualifier and column
 * family, in the given row range as defined in the Scan object. In its
 * current implementation, it takes one column family and one column qualifier
 * (if provided). In case of null column qualifier, minimum value for the
 * entire column family will be returned.
 *//*from   w  ww .  ja v a2  s .  co m*/
@Override
public void getMin(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
    AggregateResponse response = null;
    InternalScanner scanner = null;
    T min = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        T temp;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        scanner = env.getRegion().getScanner(scan);
        List<Cell> results = new ArrayList<Cell>();
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        boolean hasMoreRows = false;
        do {
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                temp = ci.getValue(colFamily, qualifier, kv);
                min = (min == null || (temp != null && ci.compare(temp, min) < 0)) ? temp : min;
            }
            results.clear();
        } while (hasMoreRows);
        if (min != null) {
            response = AggregateResponse.newBuilder().addFirstPart(ci.getProtoForCellType(min).toByteString())
                    .build();
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    log.info("Minimum from this region is " + env.getRegion().getRegionNameAsString() + ": " + min);
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.AggregateImplementation.java

/**
 * Gives the sum for a given combination of column qualifier and column
 * family, in the given row range as defined in the Scan object. In its
 * current implementation, it takes one column family and one column qualifier
 * (if provided). In case of null column qualifier, sum for the entire column
 * family will be returned./*from   w  ww  .ja  v  a 2  s.  c o m*/
 */
@Override
public void getSum(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
    AggregateResponse response = null;
    InternalScanner scanner = null;
    long sum = 0l;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        S sumVal = null;
        T temp;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        scanner = env.getRegion().getScanner(scan);
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        List<Cell> results = new ArrayList<Cell>();
        boolean hasMoreRows = false;
        do {
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                temp = ci.getValue(colFamily, qualifier, kv);
                if (temp != null)
                    sumVal = ci.add(sumVal, ci.castToReturnType(temp));
            }
            results.clear();
        } while (hasMoreRows);
        if (sumVal != null) {
            response = AggregateResponse.newBuilder()
                    .addFirstPart(ci.getProtoForPromotedType(sumVal).toByteString()).build();
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    log.debug("Sum from this region is " + env.getRegion().getRegionNameAsString() + ": " + sum);
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.AggregateImplementation.java

/**
 * Gives the row count for the given column family and column qualifier, in
 * the given row range as defined in the Scan object.
 * @throws IOException//  ww  w.  jav a  2  s.  c  om
 */
@Override
public void getRowNum(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
    AggregateResponse response = null;
    long counter = 0l;
    List<Cell> results = new ArrayList<Cell>();
    InternalScanner scanner = null;
    try {
        Scan scan = ProtobufUtil.toScan(request.getScan());
        byte[][] colFamilies = scan.getFamilies();
        byte[] colFamily = colFamilies != null ? colFamilies[0] : null;
        NavigableSet<byte[]> qualifiers = colFamilies != null ? scan.getFamilyMap().get(colFamily) : null;
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        if (scan.getFilter() == null && qualifier == null)
            scan.setFilter(new FirstKeyOnlyFilter());
        scanner = env.getRegion().getScanner(scan);
        boolean hasMoreRows = false;
        do {
            hasMoreRows = scanner.next(results);
            if (results.size() > 0) {
                counter++;
            }
            results.clear();
        } while (hasMoreRows);
        ByteBuffer bb = ByteBuffer.allocate(8).putLong(counter);
        bb.rewind();
        response = AggregateResponse.newBuilder().addFirstPart(ByteString.copyFrom(bb)).build();
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    log.info("Row counter from this region is " + env.getRegion().getRegionNameAsString() + ": " + counter);
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.AggregateImplementation.java

/**
 * Gives a Pair with first object as Sum and second object as row count,
 * computed for a given combination of column qualifier and column family in
 * the given row range as defined in the Scan object. In its current
 * implementation, it takes one column family and one column qualifier (if
 * provided). In case of null column qualifier, an aggregate sum over all the
 * entire column family will be returned.
 * <p>/*w w w. ja va2s .  c  om*/
 * The average is computed in
 * AggregationClient#avg(byte[], ColumnInterpreter, Scan) by
 * processing results from all regions, so its "ok" to pass sum and a Long
 * type.
 */
@Override
public void getAvg(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
    AggregateResponse response = null;
    InternalScanner scanner = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        S sumVal = null;
        Long rowCountVal = 0l;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        scanner = env.getRegion().getScanner(scan);
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        List<Cell> results = new ArrayList<Cell>();
        boolean hasMoreRows = false;

        do {
            results.clear();
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                sumVal = ci.add(sumVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, kv)));
            }
            rowCountVal++;
        } while (hasMoreRows);
        if (sumVal != null) {
            ByteString first = ci.getProtoForPromotedType(sumVal).toByteString();
            AggregateResponse.Builder pair = AggregateResponse.newBuilder();
            pair.addFirstPart(first);
            ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal);
            bb.rewind();
            pair.setSecondPart(ByteString.copyFrom(bb));
            response = pair.build();
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.AggregateImplementation.java

/**
 * Gives a Pair with first object a List containing Sum and sum of squares,
 * and the second object as row count. It is computed for a given combination of
 * column qualifier and column family in the given row range as defined in the
 * Scan object. In its current implementation, it takes one column family and
 * one column qualifier (if provided). The idea is get the value of variance first:
 * the average of the squares less the square of the average a standard
 * deviation is square root of variance.
 *//*from ww w . j ava 2  s. c  om*/
@Override
public void getStd(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
    InternalScanner scanner = null;
    AggregateResponse response = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        S sumVal = null, sumSqVal = null, tempVal = null;
        long rowCountVal = 0l;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        scanner = env.getRegion().getScanner(scan);
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        List<Cell> results = new ArrayList<Cell>();

        boolean hasMoreRows = false;

        do {
            tempVal = null;
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, kv)));
            }
            results.clear();
            sumVal = ci.add(sumVal, tempVal);
            sumSqVal = ci.add(sumSqVal, ci.multiply(tempVal, tempVal));
            rowCountVal++;
        } while (hasMoreRows);
        if (sumVal != null) {
            ByteString first_sumVal = ci.getProtoForPromotedType(sumVal).toByteString();
            ByteString first_sumSqVal = ci.getProtoForPromotedType(sumSqVal).toByteString();
            AggregateResponse.Builder pair = AggregateResponse.newBuilder();
            pair.addFirstPart(first_sumVal);
            pair.addFirstPart(first_sumSqVal);
            ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal);
            bb.rewind();
            pair.setSecondPart(ByteString.copyFrom(bb));
            response = pair.build();
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.AggregateImplementation.java

/**
 * Gives a List containing sum of values and sum of weights.
 * It is computed for the combination of column
 * family and column qualifier(s) in the given row range as defined in the
 * Scan object. In its current implementation, it takes one column family and
 * two column qualifiers. The first qualifier is for values column and 
 * the second qualifier (optional) is for weight column.
 *//*from  www .  j a  v a2  s  .c  o m*/
@Override
public void getMedian(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
    AggregateResponse response = null;
    InternalScanner scanner = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        S sumVal = null, sumWeights = null, tempVal = null, tempWeight = null;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        scanner = env.getRegion().getScanner(scan);
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] valQualifier = null, weightQualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            valQualifier = qualifiers.pollFirst();
            // if weighted median is requested, get qualifier for the weight column
            weightQualifier = qualifiers.pollLast();
        }
        List<Cell> results = new ArrayList<Cell>();

        boolean hasMoreRows = false;

        do {
            tempVal = null;
            tempWeight = null;
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, valQualifier, kv)));
                if (weightQualifier != null) {
                    tempWeight = ci.add(tempWeight,
                            ci.castToReturnType(ci.getValue(colFamily, weightQualifier, kv)));
                }
            }
            results.clear();
            sumVal = ci.add(sumVal, tempVal);
            sumWeights = ci.add(sumWeights, tempWeight);
        } while (hasMoreRows);
        ByteString first_sumVal = ci.getProtoForPromotedType(sumVal).toByteString();
        S s = sumWeights == null ? ci.castToReturnType(ci.getMinValue()) : sumWeights;
        ByteString first_sumWeights = ci.getProtoForPromotedType(s).toByteString();
        AggregateResponse.Builder pair = AggregateResponse.newBuilder();
        pair.addFirstPart(first_sumVal);
        pair.addFirstPart(first_sumWeights);
        response = pair.build();
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    done.run(response);
}