Example usage for java.util NavigableSet pollFirst

List of usage examples for java.util NavigableSet pollFirst

Introduction

In this page you can find the example usage for java.util NavigableSet pollFirst.

Prototype

E pollFirst();

Source Link

Document

Retrieves and removes the first (lowest) element, or returns null if this set is empty.

Usage

From source file:org.apache.hadoop.hbase.coprocessor.AggregateImplementation.java

/**
 * Gives the row count for the given column family and column qualifier, in
 * the given row range as defined in the Scan object.
 * @throws IOException//from   w  w w  .jav a 2 s. co m
 */
@Override
public void getRowNum(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
    AggregateResponse response = null;
    long counter = 0l;
    List<Cell> results = new ArrayList<Cell>();
    InternalScanner scanner = null;
    try {
        Scan scan = ProtobufUtil.toScan(request.getScan());
        byte[][] colFamilies = scan.getFamilies();
        byte[] colFamily = colFamilies != null ? colFamilies[0] : null;
        NavigableSet<byte[]> qualifiers = colFamilies != null ? scan.getFamilyMap().get(colFamily) : null;
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        if (scan.getFilter() == null && qualifier == null)
            scan.setFilter(new FirstKeyOnlyFilter());
        scanner = env.getRegion().getScanner(scan);
        boolean hasMoreRows = false;
        do {
            hasMoreRows = scanner.next(results);
            if (results.size() > 0) {
                counter++;
            }
            results.clear();
        } while (hasMoreRows);
        ByteBuffer bb = ByteBuffer.allocate(8).putLong(counter);
        bb.rewind();
        response = AggregateResponse.newBuilder().addFirstPart(ByteString.copyFrom(bb)).build();
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    log.info("Row counter from this region is " + env.getRegion().getRegionNameAsString() + ": " + counter);
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.AggregateImplementation.java

/**
 * Gives a Pair with first object as Sum and second object as row count,
 * computed for a given combination of column qualifier and column family in
 * the given row range as defined in the Scan object. In its current
 * implementation, it takes one column family and one column qualifier (if
 * provided). In case of null column qualifier, an aggregate sum over all the
 * entire column family will be returned.
 * <p>//from   ww  w .j a v a2 s .  c o m
 * The average is computed in
 * AggregationClient#avg(byte[], ColumnInterpreter, Scan) by
 * processing results from all regions, so its "ok" to pass sum and a Long
 * type.
 */
@Override
public void getAvg(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
    AggregateResponse response = null;
    InternalScanner scanner = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        S sumVal = null;
        Long rowCountVal = 0l;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        scanner = env.getRegion().getScanner(scan);
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        List<Cell> results = new ArrayList<Cell>();
        boolean hasMoreRows = false;

        do {
            results.clear();
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                sumVal = ci.add(sumVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, kv)));
            }
            rowCountVal++;
        } while (hasMoreRows);
        if (sumVal != null) {
            ByteString first = ci.getProtoForPromotedType(sumVal).toByteString();
            AggregateResponse.Builder pair = AggregateResponse.newBuilder();
            pair.addFirstPart(first);
            ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal);
            bb.rewind();
            pair.setSecondPart(ByteString.copyFrom(bb));
            response = pair.build();
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.AggregateImplementation.java

/**
 * Gives a Pair with first object a List containing Sum and sum of squares,
 * and the second object as row count. It is computed for a given combination of
 * column qualifier and column family in the given row range as defined in the
 * Scan object. In its current implementation, it takes one column family and
 * one column qualifier (if provided). The idea is get the value of variance first:
 * the average of the squares less the square of the average a standard
 * deviation is square root of variance.
 *///from  w  ww.  j av a2s  .c om
@Override
public void getStd(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
    InternalScanner scanner = null;
    AggregateResponse response = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        S sumVal = null, sumSqVal = null, tempVal = null;
        long rowCountVal = 0l;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        scanner = env.getRegion().getScanner(scan);
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        List<Cell> results = new ArrayList<Cell>();

        boolean hasMoreRows = false;

        do {
            tempVal = null;
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, kv)));
            }
            results.clear();
            sumVal = ci.add(sumVal, tempVal);
            sumSqVal = ci.add(sumSqVal, ci.multiply(tempVal, tempVal));
            rowCountVal++;
        } while (hasMoreRows);
        if (sumVal != null) {
            ByteString first_sumVal = ci.getProtoForPromotedType(sumVal).toByteString();
            ByteString first_sumSqVal = ci.getProtoForPromotedType(sumSqVal).toByteString();
            AggregateResponse.Builder pair = AggregateResponse.newBuilder();
            pair.addFirstPart(first_sumVal);
            pair.addFirstPart(first_sumSqVal);
            ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal);
            bb.rewind();
            pair.setSecondPart(ByteString.copyFrom(bb));
            response = pair.build();
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.AggregateImplementation.java

/**
 * Gives a List containing sum of values and sum of weights.
 * It is computed for the combination of column
 * family and column qualifier(s) in the given row range as defined in the
 * Scan object. In its current implementation, it takes one column family and
 * two column qualifiers. The first qualifier is for values column and 
 * the second qualifier (optional) is for weight column.
 *///www  . j  a v  a  2s  . c  o  m
@Override
public void getMedian(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
    AggregateResponse response = null;
    InternalScanner scanner = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        S sumVal = null, sumWeights = null, tempVal = null, tempWeight = null;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        scanner = env.getRegion().getScanner(scan);
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] valQualifier = null, weightQualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            valQualifier = qualifiers.pollFirst();
            // if weighted median is requested, get qualifier for the weight column
            weightQualifier = qualifiers.pollLast();
        }
        List<Cell> results = new ArrayList<Cell>();

        boolean hasMoreRows = false;

        do {
            tempVal = null;
            tempWeight = null;
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, valQualifier, kv)));
                if (weightQualifier != null) {
                    tempWeight = ci.add(tempWeight,
                            ci.castToReturnType(ci.getValue(colFamily, weightQualifier, kv)));
                }
            }
            results.clear();
            sumVal = ci.add(sumVal, tempVal);
            sumWeights = ci.add(sumWeights, tempWeight);
        } while (hasMoreRows);
        ByteString first_sumVal = ci.getProtoForPromotedType(sumVal).toByteString();
        S s = sumWeights == null ? ci.castToReturnType(ci.getMinValue()) : sumWeights;
        ByteString first_sumWeights = ci.getProtoForPromotedType(s).toByteString();
        AggregateResponse.Builder pair = AggregateResponse.newBuilder();
        pair.addFirstPart(first_sumVal);
        pair.addFirstPart(first_sumWeights);
        response = pair.build();
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.transactional.SsccRegionEndpoint.java

/**
 * Gives the maximum for a given combination of column qualifier and column
 * family, in the given row range as defined in the Scan object. In its
 * current implementation, it takes one column family and one column qualifier
 * (if provided). In case of null column qualifier, maximum value for the
 * entire column family will be returned.
 *///from  w  w w .  j  a  va 2  s . c  o m
@Override
public void getMax(RpcController controller, SsccTransactionalAggregateRequest request,
        RpcCallback<SsccTransactionalAggregateResponse> done) {
    if (LOG.isTraceEnabled())
        LOG.trace("SsccRegionEndpoint coprocessor: getMax entry");
    RegionScanner scanner = null;
    SsccTransactionalAggregateResponse response = null;
    T max = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        T temp;
        long transactionId = request.getTransactionId();
        long startId = request.getStartId();
        Scan scan = ProtobufUtil.toScan(request.getScan());
        scanner = getScanner(transactionId, startId, scan);
        List<Cell> results = new ArrayList<Cell>();
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        // qualifier can be null.
        boolean hasMoreRows = false;
        do {
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                temp = ci.getValue(colFamily, qualifier, kv);
                max = (max == null || (temp != null && ci.compare(temp, max) > 0)) ? temp : max;
            }
            results.clear();
        } while (hasMoreRows);
        if (max != null) {
            SsccTransactionalAggregateResponse.Builder builder = SsccTransactionalAggregateResponse
                    .newBuilder();
            builder.addFirstPart(ci.getProtoForCellType(max).toByteString());
            response = builder.build();
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    if (LOG.isTraceEnabled())
        LOG.trace("SsccRegionEndpoint coprocessor: getMax - Maximum from this region is "
                + env.getRegion().getRegionNameAsString() + ": " + max);
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.transactional.SsccRegionEndpoint.java

/**
 * Gives the minimum for a given combination of column qualifier and column
 * family, in the given row range as defined in the Scan object. In its
 * current implementation, it takes one column family and one column qualifier
 * (if provided). In case of null column qualifier, minimum value for the
 * entire column family will be returned.
 *//*from  www.ja  v a2  s .c  o  m*/
@Override
public void getMin(RpcController controller, SsccTransactionalAggregateRequest request,
        RpcCallback<SsccTransactionalAggregateResponse> done) {
    if (LOG.isTraceEnabled())
        LOG.trace("SsccRegionEndpoint coprocessor: getMin entry");
    SsccTransactionalAggregateResponse response = null;
    RegionScanner scanner = null;
    T min = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        T temp;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        long transactionId = request.getTransactionId();
        long startId = request.getStartId();
        scanner = getScanner(transactionId, startId, scan);
        List<Cell> results = new ArrayList<Cell>();
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        boolean hasMoreRows = false;
        do {
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                temp = ci.getValue(colFamily, qualifier, kv);
                min = (min == null || (temp != null && ci.compare(temp, min) < 0)) ? temp : min;
            }
            results.clear();
        } while (hasMoreRows);
        if (min != null) {
            response = SsccTransactionalAggregateResponse.newBuilder()
                    .addFirstPart(ci.getProtoForCellType(min).toByteString()).build();
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    if (LOG.isTraceEnabled())
        LOG.trace("Minimum from this region is " + env.getRegion().getRegionNameAsString() + ": " + min);
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.transactional.SsccRegionEndpoint.java

/**
 * Gives the sum for a given combination of column qualifier and column
 * family, in the given row range as defined in the Scan object. In its
 * current implementation, it takes one column family and one column qualifier
 * (if provided). In case of null column qualifier, sum for the entire column
 * family will be returned.//w ww .j ava  2 s . co  m
 */
@Override
public void getSum(RpcController controller, SsccTransactionalAggregateRequest request,
        RpcCallback<SsccTransactionalAggregateResponse> done) {
    if (LOG.isTraceEnabled())
        LOG.trace("SsccRegionEndpoint coprocessor: getSum entry");
    SsccTransactionalAggregateResponse response = null;
    RegionScanner scanner = null;
    long sum = 0L;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        S sumVal = null;
        T temp;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        long transactionId = request.getTransactionId();
        long startId = request.getStartId();
        scanner = getScanner(transactionId, startId, scan);
        SsccTransactionState state = this.beginTransIfNotExist(transactionId, startId);
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        List<Cell> results = new ArrayList<Cell>();
        boolean hasMoreRows = false;
        boolean firstCell;
        do {
            hasMoreRows = scanner.next(results);
            firstCell = true;
            Result verResult = null;
            Result statusResult = null;
            Result colResult = null;
            for (Cell c : results) {
                if (firstCell == true) {
                    if (CellUtil.cloneFamily(c) != DtmConst.TRANSACTION_META_FAMILY) {
                        //get the statusList
                        Get statusGet = new Get(c.getRow()); //TODO: deprecated API
                        if (LOG.isTraceEnabled())
                            LOG.trace("SsccRegionEndpoint coprocessor: getSum first row:  " + c.getRow());
                        //statusGet.setTimeStamp(startId);
                        statusGet.addColumn(DtmConst.TRANSACTION_META_FAMILY, SsccConst.STATUS_COL);
                        statusGet.setMaxVersions(DtmConst.MAX_VERSION);
                        statusResult = m_Region.get(statusGet);

                        //get the colList
                        Get colGet = new Get(c.getRow()); //TODO: deprecated API
                        //colGet.setTimeStamp(startId);
                        colGet.addColumn(DtmConst.TRANSACTION_META_FAMILY, SsccConst.COLUMNS_COL);
                        colGet.setMaxVersions(DtmConst.MAX_VERSION);
                        colResult = m_Region.get(colGet);

                        //get the versionList
                        Get verGet = new Get(c.getRow());//TODO: deprecated API
                        //verGet.setTimeStamp(startId);
                        verGet.addColumn(DtmConst.TRANSACTION_META_FAMILY, SsccConst.VERSION_COL);
                        verGet.setMaxVersions(DtmConst.MAX_VERSION);
                        verResult = m_Region.get(verGet);
                        firstCell = false;
                    }

                    if (firstCell == false) {

                        temp = ci.getValue(colFamily, qualifier, c);
                        if (temp != null) {
                            if (state.handleResult(c, statusResult.listCells(), verResult.listCells(),
                                    colResult.listCells(), transactionId) == true) {
                                if (LOG.isTraceEnabled())
                                    LOG.trace("SsccRegionEndpoint coprocessor: getSum adding cell: "
                                            + c.getRow());
                                sumVal = ci.add(sumVal, ci.castToReturnType(temp));
                                break;
                            }
                        }
                    }
                }
            }
            results.clear();
        } while (hasMoreRows);
        if (sumVal != null) {
            response = SsccTransactionalAggregateResponse.newBuilder()
                    .addFirstPart(ci.getProtoForPromotedType(sumVal).toByteString()).build();
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    if (LOG.isTraceEnabled())
        LOG.trace("Sum from this region is " + env.getRegion().getRegionNameAsString() + ": " + sum);
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.transactional.SsccRegionEndpoint.java

/**
 * Gives the row count for the given column family and column qualifier, in
 * the given row range as defined in the Scan object.
 * @throws IOException//  ww  w. jav  a  2s .co  m
 */
@Override
public void getRowNum(RpcController controller, SsccTransactionalAggregateRequest request,
        RpcCallback<SsccTransactionalAggregateResponse> done) {
    if (LOG.isTraceEnabled())
        LOG.trace("SsccRegionEndpoint coprocessor: getRowNum entry");
    SsccTransactionalAggregateResponse response = null;
    long counter = 0L;
    List<Cell> results = new ArrayList<Cell>();
    RegionScanner scanner = null;
    long transactionId = 0L;
    try {
        Scan scan = ProtobufUtil.toScan(request.getScan());
        byte[][] colFamilies = scan.getFamilies();
        byte[] colFamily = colFamilies != null ? colFamilies[0] : null;
        NavigableSet<byte[]> qualifiers = colFamilies != null ? scan.getFamilyMap().get(colFamily) : null;
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        if (scan.getFilter() == null && qualifier == null)
            scan.setFilter(new FirstKeyOnlyFilter());
        transactionId = request.getTransactionId();
        long startId = request.getStartId();
        scanner = getScanner(transactionId, startId, scan);
        SsccTransactionState state = this.beginTransIfNotExist(transactionId, startId);
        boolean hasMoreRows = false;
        boolean firstCell;
        do {
            hasMoreRows = scanner.next(results);
            firstCell = true;
            Result verResult = null;
            Result statusResult = null;
            Result colResult = null;
            for (Cell c : results) {
                if (firstCell == true) {
                    if (CellUtil.cloneFamily(c) != DtmConst.TRANSACTION_META_FAMILY) {
                        //get the statusList
                        Get statusGet = new Get(c.getRow()); //TODO: deprecated API
                        if (LOG.isTraceEnabled())
                            LOG.trace("SsccRegionEndpoint coprocessor: getRowNum first row:  " + c.getRow());
                        //statusGet.setTimeStamp(startId);
                        statusGet.addColumn(DtmConst.TRANSACTION_META_FAMILY, SsccConst.STATUS_COL);
                        statusGet.setMaxVersions(DtmConst.MAX_VERSION);
                        statusResult = m_Region.get(statusGet);

                        //get the colList
                        Get colGet = new Get(c.getRow()); //TODO: deprecated API
                        //colGet.setTimeStamp(startId);
                        colGet.addColumn(DtmConst.TRANSACTION_META_FAMILY, SsccConst.COLUMNS_COL);
                        colGet.setMaxVersions(DtmConst.MAX_VERSION);
                        colResult = m_Region.get(colGet);

                        //get the versionList
                        Get verGet = new Get(c.getRow());//TODO: deprecated API
                        //verGet.setTimeStamp(startId);
                        verGet.addColumn(DtmConst.TRANSACTION_META_FAMILY, SsccConst.VERSION_COL);
                        verGet.setMaxVersions(DtmConst.MAX_VERSION);
                        verResult = m_Region.get(verGet);
                        firstCell = false;
                    }

                    if (firstCell == false) {
                        if (state.handleResult(c, statusResult.listCells(), verResult.listCells(),
                                colResult.listCells(), transactionId) == true) {
                            if (LOG.isTraceEnabled())
                                LOG.trace(
                                        "SsccRegionEndpoint coprocessor: getRowNum adding cell: " + c.getRow());
                            counter++;
                            break;
                        }
                    }
                }
            }
            results.clear();
        } while (hasMoreRows);
        ByteBuffer bb = ByteBuffer.allocate(8).putLong(counter);
        bb.rewind();
        response = SsccTransactionalAggregateResponse.newBuilder().addFirstPart(ByteString.copyFrom(bb))
                .build();
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    if (LOG.isTraceEnabled())
        LOG.trace("Row counter for transactionId " + transactionId + " from this region: "
                + env.getRegion().getRegionNameAsString() + " is " + counter);
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.transactional.SsccRegionEndpoint.java

/**
 * Gives a Pair with first object as Sum and second object as row count,
 * computed for a given combination of column qualifier and column family in
 * the given row range as defined in the Scan object. In its current
 * implementation, it takes one column family and one column qualifier (if
 * provided). In case of null column qualifier, an aggregate sum over all the
 * entire column family will be returned.
 * <p>/*from w  w w .j a va2 s  .  co  m*/
 * The average is computed in
 * AggregationClient#avg(byte[], ColumnInterpreter, Scan) by
 * processing results from all regions, so its "ok" to pass sum and a Long
 * type.
 */
@Override
public void getAvg(RpcController controller, SsccTransactionalAggregateRequest request,
        RpcCallback<SsccTransactionalAggregateResponse> done) {
    if (LOG.isTraceEnabled())
        LOG.trace("SsccRegionEndpoint coprocessor: getAvg entry");
    SsccTransactionalAggregateResponse response = null;
    RegionScanner scanner = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        S sumVal = null;
        Long rowCountVal = 0L;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        long transactionId = request.getTransactionId();
        long startId = request.getStartId();
        scanner = getScanner(transactionId, startId, scan);
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        List<Cell> results = new ArrayList<Cell>();
        boolean hasMoreRows = false;

        do {
            results.clear();
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                sumVal = ci.add(sumVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, kv)));
            }
            rowCountVal++;
        } while (hasMoreRows);
        if (sumVal != null) {
            ByteString first = ci.getProtoForPromotedType(sumVal).toByteString();
            SsccTransactionalAggregateResponse.Builder pair = SsccTransactionalAggregateResponse.newBuilder();
            pair.addFirstPart(first);
            ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal);
            bb.rewind();
            pair.setSecondPart(ByteString.copyFrom(bb));
            response = pair.build();
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.transactional.SsccRegionEndpoint.java

/**
 * Gives a Pair with first object a List containing Sum and sum of squares,
 * and the second object as row count. It is computed for a given combination of
 * column qualifier and column family in the given row range as defined in the
 * Scan object. In its current implementation, it takes one column family and
 * one column qualifier (if provided). The idea is get the value of variance first:
 * the average of the squares less the square of the average a standard
 * deviation is square root of variance.
 *///from  ww  w . jav a 2  s . c  o m
@Override
public void getStd(RpcController controller, SsccTransactionalAggregateRequest request,
        RpcCallback<SsccTransactionalAggregateResponse> done) {
    if (LOG.isTraceEnabled())
        LOG.trace("SsccRegionEndpoint coprocessor: getStd entry");
    RegionScanner scanner = null;
    SsccTransactionalAggregateResponse response = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        S sumVal = null, sumSqVal = null, tempVal = null;
        long rowCountVal = 0L;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        long transactionId = request.getTransactionId();
        long startId = request.getStartId();
        scanner = getScanner(transactionId, startId, scan);
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        List<Cell> results = new ArrayList<Cell>();

        boolean hasMoreRows = false;

        do {
            tempVal = null;
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, kv)));
            }
            results.clear();
            sumVal = ci.add(sumVal, tempVal);
            sumSqVal = ci.add(sumSqVal, ci.multiply(tempVal, tempVal));
            rowCountVal++;
        } while (hasMoreRows);
        if (sumVal != null) {
            ByteString first_sumVal = ci.getProtoForPromotedType(sumVal).toByteString();
            ByteString first_sumSqVal = ci.getProtoForPromotedType(sumSqVal).toByteString();
            SsccTransactionalAggregateResponse.Builder pair = SsccTransactionalAggregateResponse.newBuilder();
            pair.addFirstPart(first_sumVal);
            pair.addFirstPart(first_sumSqVal);
            ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal);
            bb.rewind();
            pair.setSecondPart(ByteString.copyFrom(bb));
            response = pair.build();
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    done.run(response);
}