List of usage examples for java.util NavigableSet pollFirst
E pollFirst();
From source file:org.apache.hadoop.hbase.coprocessor.transactional.SsccRegionEndpoint.java
/** * Gives a List containing sum of values and sum of weights. * It is computed for the combination of column * family and column qualifier(s) in the given row range as defined in the * Scan object. In its current implementation, it takes one column family and * two column qualifiers. The first qualifier is for values column and * the second qualifier (optional) is for weight column. *//*from w w w. j a v a 2s. co m*/ @Override public void getMedian(RpcController controller, SsccTransactionalAggregateRequest request, RpcCallback<SsccTransactionalAggregateResponse> done) { if (LOG.isTraceEnabled()) LOG.trace("SsccRegionEndpoint coprocessor: getMedian entry"); SsccTransactionalAggregateResponse response = null; RegionScanner scanner = null; try { ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request); S sumVal = null, sumWeights = null, tempVal = null, tempWeight = null; Scan scan = ProtobufUtil.toScan(request.getScan()); long transactionId = request.getTransactionId(); long startId = request.getStartId(); scanner = getScanner(transactionId, startId, scan); byte[] colFamily = scan.getFamilies()[0]; NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily); byte[] valQualifier = null, weightQualifier = null; if (qualifiers != null && !qualifiers.isEmpty()) { valQualifier = qualifiers.pollFirst(); // if weighted median is requested, get qualifier for the weight column weightQualifier = qualifiers.pollLast(); } List<Cell> results = new ArrayList<Cell>(); boolean hasMoreRows = false; do { tempVal = null; tempWeight = null; hasMoreRows = scanner.next(results); for (Cell kv : results) { tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, valQualifier, kv))); if (weightQualifier != null) { tempWeight = ci.add(tempWeight, ci.castToReturnType(ci.getValue(colFamily, weightQualifier, kv))); } } results.clear(); sumVal = ci.add(sumVal, tempVal); sumWeights = ci.add(sumWeights, tempWeight); } while (hasMoreRows); ByteString first_sumVal = ci.getProtoForPromotedType(sumVal).toByteString(); S s = sumWeights == null ? ci.castToReturnType(ci.getMinValue()) : sumWeights; ByteString first_sumWeights = ci.getProtoForPromotedType(s).toByteString(); SsccTransactionalAggregateResponse.Builder pair = SsccTransactionalAggregateResponse.newBuilder(); pair.addFirstPart(first_sumVal); pair.addFirstPart(first_sumWeights); response = pair.build(); } catch (IOException e) { ResponseConverter.setControllerException(controller, e); } finally { if (scanner != null) { try { scanner.close(); } catch (IOException ignored) { } } } done.run(response); }
From source file:org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint.java
/** * Gives the maximum for a given combination of column qualifier and column * family, in the given row range as defined in the Scan object. In its * current implementation, it takes one column family and one column qualifier * (if provided). In case of null column qualifier, maximum value for the * entire column family will be returned. *//*from ww w. java 2 s. co m*/ @Override public void getMax(RpcController controller, TransactionalAggregateRequest request, RpcCallback<TransactionalAggregateResponse> done) { RegionScanner scanner = null; TransactionalAggregateResponse response = null; long transactionId = request.getTransactionId(); T max = null; try { ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request); T temp; Scan scan = ProtobufUtil.toScan(request.getScan()); scanner = getScanner(transactionId, scan); List<Cell> results = new ArrayList<Cell>(); byte[] colFamily = scan.getFamilies()[0]; NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily); byte[] qualifier = null; if (qualifiers != null && !qualifiers.isEmpty()) { qualifier = qualifiers.pollFirst(); } // qualifier can be null. boolean hasMoreRows = false; do { hasMoreRows = scanner.next(results); for (Cell kv : results) { temp = ci.getValue(colFamily, qualifier, kv); max = (max == null || (temp != null && ci.compare(temp, max) > 0)) ? temp : max; } results.clear(); } while (hasMoreRows); if (max != null) { TransactionalAggregateResponse.Builder builder = TransactionalAggregateResponse.newBuilder(); builder.addFirstPart(ci.getProtoForCellType(max).toByteString()); response = builder.build(); } } catch (IOException e) { ResponseConverter.setControllerException(controller, e); } finally { if (scanner != null) { try { scanner.close(); } catch (IOException ignored) { } } } if (LOG.isInfoEnabled()) LOG.info("TrxRegionEndpoint coprocessor: getMax - txId " + transactionId + ", Maximum from this region is " + env.getRegion().getRegionNameAsString() + ": " + max); done.run(response); }
From source file:org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint.java
/** * Gives the minimum for a given combination of column qualifier and column * family, in the given row range as defined in the Scan object. In its * current implementation, it takes one column family and one column qualifier * (if provided). In case of null column qualifier, minimum value for the * entire column family will be returned. */// www . j a v a 2 s .c o m @Override public void getMin(RpcController controller, TransactionalAggregateRequest request, RpcCallback<TransactionalAggregateResponse> done) { TransactionalAggregateResponse response = null; RegionScanner scanner = null; long transactionId = request.getTransactionId(); T min = null; try { ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request); T temp; Scan scan = ProtobufUtil.toScan(request.getScan()); scanner = getScanner(transactionId, scan); List<Cell> results = new ArrayList<Cell>(); byte[] colFamily = scan.getFamilies()[0]; NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily); byte[] qualifier = null; if (qualifiers != null && !qualifiers.isEmpty()) { qualifier = qualifiers.pollFirst(); } boolean hasMoreRows = false; do { hasMoreRows = scanner.next(results); for (Cell kv : results) { temp = ci.getValue(colFamily, qualifier, kv); min = (min == null || (temp != null && ci.compare(temp, min) < 0)) ? temp : min; } results.clear(); } while (hasMoreRows); if (min != null) { response = TransactionalAggregateResponse.newBuilder() .addFirstPart(ci.getProtoForCellType(min).toByteString()).build(); } } catch (IOException e) { ResponseConverter.setControllerException(controller, e); } finally { if (scanner != null) { try { scanner.close(); } catch (IOException ignored) { } } } if (LOG.isInfoEnabled()) LOG.info("TrxRegionEndpoint coprocessor: getMin - txId " + transactionId + ", Minimum from this region is " + env.getRegion().getRegionNameAsString() + ": " + min); done.run(response); }
From source file:org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint.java
/** * Gives the sum for a given combination of column qualifier and column * family, in the given row range as defined in the Scan object. In its * current implementation, it takes one column family and one column qualifier * (if provided). In case of null column qualifier, sum for the entire column * family will be returned.//from w ww .j av a2 s .co m */ @Override public void getSum(RpcController controller, TransactionalAggregateRequest request, RpcCallback<TransactionalAggregateResponse> done) { TransactionalAggregateResponse response = null; RegionScanner scanner = null; long sum = 0l; long transactionId = request.getTransactionId(); try { ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request); S sumVal = null; T temp; Scan scan = ProtobufUtil.toScan(request.getScan()); scanner = getScanner(transactionId, scan); byte[] colFamily = scan.getFamilies()[0]; NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily); byte[] qualifier = null; if (qualifiers != null && !qualifiers.isEmpty()) { qualifier = qualifiers.pollFirst(); } List<Cell> results = new ArrayList<Cell>(); boolean hasMoreRows = false; do { hasMoreRows = scanner.next(results); for (Cell kv : results) { temp = ci.getValue(colFamily, qualifier, kv); if (temp != null) sumVal = ci.add(sumVal, ci.castToReturnType(temp)); } results.clear(); } while (hasMoreRows); if (sumVal != null) { response = TransactionalAggregateResponse.newBuilder() .addFirstPart(ci.getProtoForPromotedType(sumVal).toByteString()).build(); } } catch (IOException e) { ResponseConverter.setControllerException(controller, e); } finally { if (scanner != null) { try { scanner.close(); } catch (IOException ignored) { } } } if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: getSum - txId " + transactionId + ", Sum from this region is " + env.getRegion().getRegionNameAsString() + ": " + sum); done.run(response); }
From source file:org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint.java
/** * Gives the row count for the given column family and column qualifier, in * the given row range as defined in the Scan object. * @throws IOException/*from w w w . j a v a 2 s. c om*/ */ @Override public void getRowNum(RpcController controller, TransactionalAggregateRequest request, RpcCallback<TransactionalAggregateResponse> done) { TransactionalAggregateResponse response = null; long counter = 0L; List<Cell> results = new ArrayList<Cell>(); RegionScanner scanner = null; long transactionId = 0L; try { Scan scan = ProtobufUtil.toScan(request.getScan()); byte[][] colFamilies = scan.getFamilies(); byte[] colFamily = colFamilies != null ? colFamilies[0] : null; NavigableSet<byte[]> qualifiers = colFamilies != null ? scan.getFamilyMap().get(colFamily) : null; byte[] qualifier = null; if (qualifiers != null && !qualifiers.isEmpty()) { qualifier = qualifiers.pollFirst(); } if (scan.getFilter() == null && qualifier == null) scan.setFilter(new FirstKeyOnlyFilter()); transactionId = request.getTransactionId(); scanner = getScanner(transactionId, scan); boolean hasMoreRows = false; do { hasMoreRows = scanner.next(results); if (results.size() > 0) { counter++; } results.clear(); } while (hasMoreRows); ByteBuffer bb = ByteBuffer.allocate(8).putLong(counter); bb.rewind(); response = TransactionalAggregateResponse.newBuilder().addFirstPart(ByteString.copyFrom(bb)).build(); } catch (IOException e) { ResponseConverter.setControllerException(controller, e); } finally { if (scanner != null) { try { scanner.close(); } catch (IOException ignored) { } } } if (LOG.isInfoEnabled()) LOG.info(String.format( "Row counter for txId %d from this region: %s is %d, startKey is [%s], endKey is [%s]", transactionId, env.getRegion().getRegionNameAsString(), counter, env.getRegion().getStartKey() == null ? "null" : Bytes.toStringBinary(env.getRegion().getStartKey()), env.getRegion().getEndKey() == null ? "null" : Bytes.toStringBinary(env.getRegion().getEndKey()))); done.run(response); }
From source file:org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint.java
/** * Gives a Pair with first object as Sum and second object as row count, * computed for a given combination of column qualifier and column family in * the given row range as defined in the Scan object. In its current * implementation, it takes one column family and one column qualifier (if * provided). In case of null column qualifier, an aggregate sum over all the * entire column family will be returned. * <p>//from w w w. j ava 2 s. com * The average is computed in * AggregationClient#avg(byte[], ColumnInterpreter, Scan) by * processing results from all regions, so its "ok" to pass sum and a Long * type. */ @Override public void getAvg(RpcController controller, TransactionalAggregateRequest request, RpcCallback<TransactionalAggregateResponse> done) { TransactionalAggregateResponse response = null; RegionScanner scanner = null; try { ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request); S sumVal = null; Long rowCountVal = 0l; Scan scan = ProtobufUtil.toScan(request.getScan()); long transactionId = request.getTransactionId(); scanner = getScanner(transactionId, scan); byte[] colFamily = scan.getFamilies()[0]; NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily); byte[] qualifier = null; if (qualifiers != null && !qualifiers.isEmpty()) { qualifier = qualifiers.pollFirst(); } List<Cell> results = new ArrayList<Cell>(); boolean hasMoreRows = false; do { results.clear(); hasMoreRows = scanner.next(results); for (Cell kv : results) { sumVal = ci.add(sumVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, kv))); } rowCountVal++; } while (hasMoreRows); if (sumVal != null) { ByteString first = ci.getProtoForPromotedType(sumVal).toByteString(); TransactionalAggregateResponse.Builder pair = TransactionalAggregateResponse.newBuilder(); pair.addFirstPart(first); ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal); bb.rewind(); pair.setSecondPart(ByteString.copyFrom(bb)); response = pair.build(); } } catch (IOException e) { ResponseConverter.setControllerException(controller, e); } finally { if (scanner != null) { try { scanner.close(); } catch (IOException ignored) { } } } done.run(response); }
From source file:org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint.java
/** * Gives a Pair with first object a List containing Sum and sum of squares, * and the second object as row count. It is computed for a given combination of * column qualifier and column family in the given row range as defined in the * Scan object. In its current implementation, it takes one column family and * one column qualifier (if provided). The idea is get the value of variance first: * the average of the squares less the square of the average a standard * deviation is square root of variance. *///from w ww . j a v a2 s.com @Override public void getStd(RpcController controller, TransactionalAggregateRequest request, RpcCallback<TransactionalAggregateResponse> done) { RegionScanner scanner = null; TransactionalAggregateResponse response = null; try { ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request); S sumVal = null, sumSqVal = null, tempVal = null; long rowCountVal = 0l; Scan scan = ProtobufUtil.toScan(request.getScan()); long transactionId = request.getTransactionId(); scanner = getScanner(transactionId, scan); byte[] colFamily = scan.getFamilies()[0]; NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily); byte[] qualifier = null; if (qualifiers != null && !qualifiers.isEmpty()) { qualifier = qualifiers.pollFirst(); } List<Cell> results = new ArrayList<Cell>(); boolean hasMoreRows = false; do { tempVal = null; hasMoreRows = scanner.next(results); for (Cell kv : results) { tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, kv))); } results.clear(); sumVal = ci.add(sumVal, tempVal); sumSqVal = ci.add(sumSqVal, ci.multiply(tempVal, tempVal)); rowCountVal++; } while (hasMoreRows); if (sumVal != null) { ByteString first_sumVal = ci.getProtoForPromotedType(sumVal).toByteString(); ByteString first_sumSqVal = ci.getProtoForPromotedType(sumSqVal).toByteString(); TransactionalAggregateResponse.Builder pair = TransactionalAggregateResponse.newBuilder(); pair.addFirstPart(first_sumVal); pair.addFirstPart(first_sumSqVal); ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal); bb.rewind(); pair.setSecondPart(ByteString.copyFrom(bb)); response = pair.build(); } } catch (IOException e) { ResponseConverter.setControllerException(controller, e); } finally { if (scanner != null) { try { scanner.close(); } catch (IOException ignored) { } } } done.run(response); }
From source file:org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint.java
/** * Gives a List containing sum of values and sum of weights. * It is computed for the combination of column * family and column qualifier(s) in the given row range as defined in the * Scan object. In its current implementation, it takes one column family and * two column qualifiers. The first qualifier is for values column and * the second qualifier (optional) is for weight column. *///from w ww . ja v a 2 s. co m @Override public void getMedian(RpcController controller, TransactionalAggregateRequest request, RpcCallback<TransactionalAggregateResponse> done) { TransactionalAggregateResponse response = null; RegionScanner scanner = null; try { ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request); S sumVal = null, sumWeights = null, tempVal = null, tempWeight = null; Scan scan = ProtobufUtil.toScan(request.getScan()); long transactionId = request.getTransactionId(); scanner = getScanner(transactionId, scan); byte[] colFamily = scan.getFamilies()[0]; NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily); byte[] valQualifier = null, weightQualifier = null; if (qualifiers != null && !qualifiers.isEmpty()) { valQualifier = qualifiers.pollFirst(); // if weighted median is requested, get qualifier for the weight column weightQualifier = qualifiers.pollLast(); } List<Cell> results = new ArrayList<Cell>(); boolean hasMoreRows = false; do { tempVal = null; tempWeight = null; hasMoreRows = scanner.next(results); for (Cell kv : results) { tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, valQualifier, kv))); if (weightQualifier != null) { tempWeight = ci.add(tempWeight, ci.castToReturnType(ci.getValue(colFamily, weightQualifier, kv))); } } results.clear(); sumVal = ci.add(sumVal, tempVal); sumWeights = ci.add(sumWeights, tempWeight); } while (hasMoreRows); ByteString first_sumVal = ci.getProtoForPromotedType(sumVal).toByteString(); S s = sumWeights == null ? ci.castToReturnType(ci.getMinValue()) : sumWeights; ByteString first_sumWeights = ci.getProtoForPromotedType(s).toByteString(); TransactionalAggregateResponse.Builder pair = TransactionalAggregateResponse.newBuilder(); pair.addFirstPart(first_sumVal); pair.addFirstPart(first_sumWeights); response = pair.build(); } catch (IOException e) { ResponseConverter.setControllerException(controller, e); } finally { if (scanner != null) { try { scanner.close(); } catch (IOException ignored) { } } } done.run(response); }
From source file:org.apache.kylin.metadata.badquery.BadQueryHistoryManager.java
public BadQueryHistory addEntryToProject(BadQueryEntry badQueryEntry, String project) throws IOException { if (StringUtils.isEmpty(project) || badQueryEntry.getAdj() == null || badQueryEntry.getSql() == null) throw new IllegalArgumentException(); BadQueryHistory badQueryHistory = getBadQueriesForProject(project); NavigableSet<BadQueryEntry> entries = badQueryHistory.getEntries(); if (entries.size() >= kylinConfig.getBadQueryHistoryNum()) { entries.pollFirst(); }//w ww . j a v a 2 s. co m entries.add(badQueryEntry); getStore().putResource(badQueryHistory.getResourcePath(), badQueryHistory, BAD_QUERY_INSTANCE_SERIALIZER); return badQueryHistory; }