Example usage for java.nio ByteBuffer getLong

List of usage examples for java.nio ByteBuffer getLong

Introduction

In this page you can find the example usage for java.nio ByteBuffer getLong.

Prototype

public abstract long getLong();

Source Link

Document

Returns the long at the current position and increases the position by 8.

Usage

From source file:org.apache.hadoop.hbase.client.transactional.TransactionalAggregationClient.java

/**
 * It computes average while fetching sum and row count from all the
 * corresponding regions. Approach is to compute a global sum of region level
 * sum and rowcount and then compute the average.
 * @param table/* ww w  .  java2  s .c  om*/
 * @param scan
 * @throws Throwable
 */
private <R, S, P extends Message, Q extends Message, T extends Message> Pair<S, Long> getAvgArgs(
        final long transactionId, final TransactionalTable table, final ColumnInterpreter<R, S, P, Q, T> ci,
        final Scan scan) throws Throwable {
    byte[] currentBeginKey = scan.getStartRow();
    HRegionInfo currentRegion = table.getRegionLocation(currentBeginKey).getRegionInfo();
    com.google.protobuf.ByteString regionName = ByteString.copyFromUtf8(currentRegion.getRegionNameAsString());
    final TransactionalAggregateRequest requestArg = validateArgAndGetPB(regionName, transactionId, scan, ci,
            false);
    class AvgCallBack implements Batch.Callback<Pair<S, Long>> {
        S sum = null;
        Long rowCount = 0l;

        public Pair<S, Long> getAvgArgs() {
            return new Pair<S, Long>(sum, rowCount);
        }

        @Override
        public synchronized void update(byte[] region, byte[] row, Pair<S, Long> result) {
            sum = ci.add(sum, result.getFirst());
            rowCount += result.getSecond();
        }
    }
    AvgCallBack avgCallBack = new AvgCallBack();
    table.coprocessorService(TrxRegionService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<TrxRegionService, Pair<S, Long>>() {
                @Override
                public Pair<S, Long> call(TrxRegionService instance) throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<TransactionalAggregateResponse> rpcCallback = new BlockingRpcCallback<TransactionalAggregateResponse>();
                    instance.getAvg(controller, requestArg, rpcCallback);
                    TransactionalAggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    Pair<S, Long> pair = new Pair<S, Long>(null, 0L);
                    if (response.getFirstPartCount() == 0) {
                        return pair;
                    }
                    ByteString b = response.getFirstPart(0);
                    T t = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 4, b);
                    S s = ci.getPromotedValueFromProto(t);
                    pair.setFirst(s);
                    ByteBuffer bb = ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart()));
                    bb.rewind();
                    pair.setSecond(bb.getLong());
                    return pair;
                }
            }, avgCallBack);
    return avgCallBack.getAvgArgs();
}

From source file:org.apache.hadoop.hbase.client.transactional.TransactionalAggregationClient.java

/**
 * It computes a global standard deviation for a given column and its value.
 * Standard deviation is square root of (average of squares -
 * average*average). From individual regions, it obtains sum, square sum and
 * number of rows. With these, the above values are computed to get the global
 * std./*from   ww  w. j  av a 2s . c o m*/
 * @param table
 * @param scan
 * @return standard deviations
 * @throws Throwable
 */
private <R, S, P extends Message, Q extends Message, T extends Message> Pair<List<S>, Long> getStdArgs(
        final long transactionId, final TransactionalTable table, final ColumnInterpreter<R, S, P, Q, T> ci,
        final Scan scan) throws Throwable {
    byte[] currentBeginKey = scan.getStartRow();
    HRegionInfo currentRegion = table.getRegionLocation(currentBeginKey).getRegionInfo();
    com.google.protobuf.ByteString regionName = ByteString.copyFromUtf8(currentRegion.getRegionNameAsString());
    final TransactionalAggregateRequest requestArg = validateArgAndGetPB(regionName, transactionId, scan, ci,
            false);
    class StdCallback implements Batch.Callback<Pair<List<S>, Long>> {
        long rowCountVal = 0l;
        S sumVal = null, sumSqVal = null;

        public Pair<List<S>, Long> getStdParams() {
            List<S> l = new ArrayList<S>();
            l.add(sumVal);
            l.add(sumSqVal);
            Pair<List<S>, Long> p = new Pair<List<S>, Long>(l, rowCountVal);
            return p;
        }

        @Override
        public synchronized void update(byte[] region, byte[] row, Pair<List<S>, Long> result) {
            if (result.getFirst().size() > 0) {
                sumVal = ci.add(sumVal, result.getFirst().get(0));
                sumSqVal = ci.add(sumSqVal, result.getFirst().get(1));
                rowCountVal += result.getSecond();
            }
        }
    }
    StdCallback stdCallback = new StdCallback();
    table.coprocessorService(TrxRegionService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<TrxRegionService, Pair<List<S>, Long>>() {
                @Override
                public Pair<List<S>, Long> call(TrxRegionService instance) throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<TransactionalAggregateResponse> rpcCallback = new BlockingRpcCallback<TransactionalAggregateResponse>();
                    instance.getStd(controller, requestArg, rpcCallback);
                    TransactionalAggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    Pair<List<S>, Long> pair = new Pair<List<S>, Long>(new ArrayList<S>(), 0L);
                    if (response.getFirstPartCount() == 0) {
                        return pair;
                    }
                    List<S> list = new ArrayList<S>();
                    for (int i = 0; i < response.getFirstPartCount(); i++) {
                        ByteString b = response.getFirstPart(i);
                        T t = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 4, b);
                        S s = ci.getPromotedValueFromProto(t);
                        list.add(s);
                    }
                    pair.setFirst(list);
                    ByteBuffer bb = ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart()));
                    bb.rewind();
                    pair.setSecond(bb.getLong());
                    return pair;
                }
            }, stdCallback);
    return stdCallback.getStdParams();
}

From source file:org.opendaylight.lispflowmapping.lisp.serializer.MapRequestSerializer.java

public MapRequest deserialize(ByteBuffer requestBuffer) {
    try {//  w  ww.j a va2s .  c  o  m
        MapRequestBuilder builder = new MapRequestBuilder();

        byte typeAndFlags = requestBuffer.get();
        builder.setAuthoritative(ByteUtil.extractBit(typeAndFlags, Flags.AUTHORITATIVE));
        builder.setMapDataPresent(ByteUtil.extractBit(typeAndFlags, Flags.MAP_DATA_PRESENT));
        builder.setProbe(ByteUtil.extractBit(typeAndFlags, Flags.PROBE));
        builder.setSmr(ByteUtil.extractBit(typeAndFlags, Flags.SMR));

        byte moreFlags = requestBuffer.get();
        builder.setPitr(ByteUtil.extractBit(moreFlags, Flags.PITR));
        builder.setSmrInvoked(ByteUtil.extractBit(moreFlags, Flags.SMR_INVOKED));

        int itrCount = ByteUtil.getUnsignedByte(requestBuffer) + 1;
        int recordCount = ByteUtil.getUnsignedByte(requestBuffer);
        builder.setNonce(requestBuffer.getLong());
        LispAddressSerializerContext ctx = new LispAddressSerializerContext(
                LispAddressSerializerContext.MASK_LEN_MISSING);
        builder.setSourceEid(new SourceEidBuilder()
                .setEid(LispAddressSerializer.getInstance().deserializeEid(requestBuffer, ctx)).build());

        if (builder.getItrRloc() == null) {
            builder.setItrRloc(new ArrayList<ItrRloc>());
        }
        for (int i = 0; i < itrCount; i++) {
            builder.getItrRloc().add(new ItrRlocBuilder()
                    .setRloc(LispAddressSerializer.getInstance().deserializeRloc(requestBuffer)).build());
        }

        if (builder.getEidItem() == null) {
            builder.setEidItem(new ArrayList<EidItem>());
        }
        for (int i = 0; i < recordCount; i++) {
            builder.getEidItem().add(new EidItemBuilder()
                    .setEid(EidRecordSerializer.getInstance().deserialize(requestBuffer)).build());
        }
        if (builder.isMapDataPresent() && requestBuffer.hasRemaining()) {
            try {
                builder.setMapReply(
                        new org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.lisp.proto.rev151105.maprequest.MapReplyBuilder()
                                .setMappingRecord(
                                        MappingRecordSerializer.getInstance().deserialize(requestBuffer))
                                .build())
                        .build();
            } catch (RuntimeException re) {
                LOG.warn("Couldn't deserialize Map-Reply encapsulated in Map-Request", re);
            }
        }
        return builder.build();
    } catch (RuntimeException re) {
        throw new LispSerializationException(
                "Couldn't deserialize Map-Request (len=" + requestBuffer.capacity() + ")", re);
    }
}

From source file:org.opendaylight.lispflowmapping.implementation.serializer.MapRequestSerializer.java

public MapRequest deserialize(ByteBuffer requestBuffer) {
    try {//from  w  w  w  . j a  v  a 2s. c om
        MapRequestBuilder builder = new MapRequestBuilder();

        byte typeAndFlags = requestBuffer.get();
        builder.setAuthoritative(ByteUtil.extractBit(typeAndFlags, Flags.AUTHORITATIVE));
        builder.setMapDataPresent(ByteUtil.extractBit(typeAndFlags, Flags.MAP_DATA_PRESENT));
        builder.setProbe(ByteUtil.extractBit(typeAndFlags, Flags.PROBE));
        builder.setSmr(ByteUtil.extractBit(typeAndFlags, Flags.SMR));

        byte moreFlags = requestBuffer.get();
        builder.setPitr(ByteUtil.extractBit(moreFlags, Flags.PITR));
        builder.setSmrInvoked(ByteUtil.extractBit(moreFlags, Flags.SMR_INVOKED));

        int itrCount = ByteUtil.getUnsignedByte(requestBuffer) + 1;
        int recordCount = ByteUtil.getUnsignedByte(requestBuffer);
        builder.setNonce(requestBuffer.getLong());
        builder.setSourceEid(
                new SourceEidBuilder()
                        .setLispAddressContainer(LispAFIConvertor
                                .toContainer(LispAddressSerializer.getInstance().deserialize(requestBuffer)))
                        .build());

        if (builder.getItrRloc() == null) {
            builder.setItrRloc(new ArrayList<ItrRloc>());
        }
        for (int i = 0; i < itrCount; i++) {
            builder.getItrRloc()
                    .add(new ItrRlocBuilder().setLispAddressContainer(LispAFIConvertor
                            .toContainer(LispAddressSerializer.getInstance().deserialize(requestBuffer)))
                            .build());
        }

        if (builder.getEidRecord() == null) {
            builder.setEidRecord(new ArrayList<EidRecord>());
        }
        for (int i = 0; i < recordCount; i++) {
            builder.getEidRecord().add(EidRecordSerializer.getInstance().deserialize(requestBuffer));
        }
        if (builder.isMapDataPresent() && requestBuffer.hasRemaining()) {
            try {
                builder.setMapReply(
                        new org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.control.plane.rev150314.maprequest.MapReplyBuilder(
                                new EidToLocatorRecordBuilder(
                                        EidToLocatorRecordSerializer.getInstance().deserialize(requestBuffer))
                                                .build()).build());
            } catch (RuntimeException re) {
                LOG.warn("couldn't deserialize map reply encapsulated in map request. {}", re.getMessage());
            }
        }
        return builder.build();
    } catch (RuntimeException re) {
        throw new LispSerializationException(
                "Couldn't deserialize Map-Request (len=" + requestBuffer.capacity() + ")", re);
    }
}

From source file:edu.umn.cs.spatialHadoop.nasa.HDFRasterLayer.java

@Override
public void readFields(DataInput in) throws IOException {
    super.readFields(in);
    this.timestamp = in.readLong();
    int length = in.readInt();
    byte[] serializedData = new byte[length];
    in.readFully(serializedData);//w  w w  .j a v  a  2s.com
    ByteArrayInputStream bais = new ByteArrayInputStream(serializedData);
    GZIPInputStream gzis = new GZIPInputStream(bais);

    byte[] buffer = new byte[8];
    gzis.read(buffer);
    ByteBuffer bbuffer = ByteBuffer.wrap(buffer);
    int width = bbuffer.getInt();
    int height = bbuffer.getInt();
    // Reallocate memory only if needed
    if (width != this.getWidth() || height != this.getHeight()) {
        sum = new long[width][height];
        count = new long[width][height];
    }
    buffer = new byte[getHeight() * 2 * 8];
    for (int x = 0; x < getWidth(); x++) {
        int size = 0;
        while (size < buffer.length) {
            size += gzis.read(buffer, size, buffer.length - size);
        }
        bbuffer = ByteBuffer.wrap(buffer);
        for (int y = 0; y < getHeight(); y++) {
            sum[x][y] = bbuffer.getLong();
            count[x][y] = bbuffer.getLong();
        }
    }
}

From source file:org.apache.hadoop.hbase.client.coprocessor.TimeseriesAggregationClient.java

/**
 * It computes average while fetching sum and row count from all the corresponding regions.
 * Approach is to compute a global sum of region level sum and rowcount and then compute the
 * average./*from www  .ja va2  s  .  co  m*/
 * @param table
 * @param scan
 * @throws Throwable
 */
private <R, S, P extends Message, Q extends Message, T extends Message> ConcurrentSkipListMap<Long, Pair<S, Long>> getAvgArgs(
        final HTable table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    final TimeseriesAggregateRequest requestArg = validateArgAndGetPB(scan, ci, false, intervalSeconds,
            timestampSecondsMin, timestampSecondsMax, keyFilterPattern);
    class AvgCallBack implements Batch.Callback<TimeseriesAggregateResponse> {
        ConcurrentSkipListMap<Long, Pair<S, Long>> averages = new ConcurrentSkipListMap<Long, Pair<S, Long>>();

        public synchronized ConcurrentSkipListMap<Long, Pair<S, Long>> getAvgArgs() {
            return averages;
        }

        @Override
        public synchronized void update(byte[] region, byte[] row, TimeseriesAggregateResponse result) {
            List<TimeseriesAggregateResponseMapEntry> results = result.getEntryList();
            for (TimeseriesAggregateResponseMapEntry entry : results) {

                if (entry.getValue().getFirstPartCount() == 0) {
                    if (!averages.containsKey(entry.getKey())) {
                        averages.put(entry.getKey(), new Pair<S, Long>(null, 0L));
                    }
                } else {

                    ByteString b = entry.getValue().getFirstPart(0);
                    T t = null;
                    try {
                        t = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 4, b);
                    } catch (IOException e) {
                        // TODO Auto-generated catch block
                        e.printStackTrace();
                    }
                    S s = ci.getPromotedValueFromProto(t);

                    ByteBuffer bb = ByteBuffer.allocate(8)
                            .put(getBytesFromResponse(entry.getValue().getSecondPart()));
                    bb.rewind();

                    if (averages.containsKey(entry.getKey())) {
                        S sum = averages.get(entry.getKey()).getFirst();
                        Long rowCount = averages.get(entry.getKey()).getSecond();
                        averages.put(entry.getKey(),
                                new Pair<S, Long>(ci.add(sum, s), rowCount + bb.getLong()));
                    } else {
                        averages.put(entry.getKey(), new Pair<S, Long>(s, bb.getLong()));
                    }
                }
            }
        }
    }
    AvgCallBack avgCallBack = new AvgCallBack();
    table.coprocessorService(TimeseriesAggregateService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<TimeseriesAggregateService, TimeseriesAggregateResponse>() {
                @Override
                public TimeseriesAggregateResponse call(TimeseriesAggregateService instance)
                        throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<TimeseriesAggregateResponse> rpcCallback = new BlockingRpcCallback<TimeseriesAggregateResponse>();
                    instance.getAvg(controller, requestArg, rpcCallback);
                    TimeseriesAggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    // if (response.getEntryCount() > 0) {
                    return response;
                    // }
                    // return null;
                }
            }, avgCallBack);
    return avgCallBack.getAvgArgs();
}

From source file:org.opendaylight.lispflowmapping.lisp.serializer.MapRegisterSerializer.java

public MapRegister deserialize(ByteBuffer registerBuffer, InetAddress sourceRloc) {
    try {/*from  w w  w  .j  a va 2s.  co m*/
        MapRegisterBuilder builder = new MapRegisterBuilder();
        builder.setMappingRecordItem(new ArrayList<MappingRecordItem>());

        byte typeAndFlags = registerBuffer.get();
        boolean xtrSiteIdPresent = ByteUtil.extractBit(typeAndFlags, Flags.XTRSITEID);
        builder.setProxyMapReply(ByteUtil.extractBit(typeAndFlags, Flags.PROXY));
        builder.setXtrSiteIdPresent(xtrSiteIdPresent);

        registerBuffer.position(registerBuffer.position() + Length.RES);
        byte mergeAndMapReply = registerBuffer.get();
        builder.setWantMapNotify(ByteUtil.extractBit(mergeAndMapReply, Flags.WANT_MAP_NOTIFY));
        builder.setMergeEnabled(ByteUtil.extractBit(mergeAndMapReply, Flags.MERGE_ENABLED));
        byte recordCount = (byte) ByteUtil.getUnsignedByte(registerBuffer);
        builder.setNonce(registerBuffer.getLong());
        builder.setKeyId(registerBuffer.getShort());
        short authenticationLength = registerBuffer.getShort();
        byte[] authenticationData = new byte[authenticationLength];
        registerBuffer.get(authenticationData);
        builder.setAuthenticationData(authenticationData);

        if (xtrSiteIdPresent) {
            List<MappingRecordBuilder> mrbs = new ArrayList<MappingRecordBuilder>();
            for (int i = 0; i < recordCount; i++) {
                mrbs.add(MappingRecordSerializer.getInstance().deserializeToBuilder(registerBuffer));
            }
            byte[] xtrIdBuf = new byte[Length.XTRID_SIZE];
            registerBuffer.get(xtrIdBuf);
            XtrId xtrId = new XtrId(xtrIdBuf);
            byte[] siteIdBuf = new byte[Length.SITEID_SIZE];
            registerBuffer.get(siteIdBuf);
            SiteId siteId = new SiteId(siteIdBuf);
            builder.setXtrId(xtrId);
            builder.setSiteId(siteId);
            for (MappingRecordBuilder mrb : mrbs) {
                mrb.setXtrId(xtrId);
                mrb.setSiteId(siteId);
                mrb.setSourceRloc(getSourceRloc(sourceRloc));
                builder.getMappingRecordItem()
                        .add(new MappingRecordItemBuilder().setMappingRecord(mrb.build()).build());
            }
        } else {
            for (int i = 0; i < recordCount; i++) {
                builder.getMappingRecordItem()
                        .add(new MappingRecordItemBuilder()
                                .setMappingRecord(
                                        MappingRecordSerializer.getInstance().deserialize(registerBuffer))
                                .build());
            }
        }

        registerBuffer.limit(registerBuffer.position());
        byte[] mapRegisterBytes = new byte[registerBuffer.position()];
        registerBuffer.position(0);
        registerBuffer.get(mapRegisterBytes);
        return builder.build();
    } catch (RuntimeException re) {
        throw new LispSerializationException(
                "Couldn't deserialize Map-Register (len=" + registerBuffer.capacity() + ")", re);
    }

}

From source file:voldemort.store.cachestore.impl.ChannelStore.java

private void init(boolean reset) throws IOException {
    if (reset) {//from w  w  w.j  av a2 s.  c o m
        indexChannel.truncate(OFFSET);
        dataChannel.truncate(OFFSET);
        keyChannel.truncate(OFFSET);
        totalRecord = 0;
    } else {
        long length = indexChannel.size() - OFFSET;
        totalRecord = (int) (length / RECORD_SIZE);
        ByteBuffer buf = ByteBuffer.allocate(RECORD_SIZE);
        logger.info("Building key map and read index file for " + filename + " total record " + totalRecord);
        long per = 0;
        int j = 0;
        if (totalRecord >= 1000000)
            per = totalRecord / 10;

        for (int i = 0; i < totalRecord; i++) {
            indexChannel.read(buf);
            assert (buf.capacity() == RECORD_SIZE);
            buf.rewind();
            byte status = buf.get();
            if (isDeleted(status))
                this.deleted++;
            else {
                long key = buf.getLong();
                byte[] keys;
                try {
                    keys = readChannel(key, keyChannel);
                    long data = buf.getLong();
                    long block2version = buf.getLong();
                    CacheBlock block = new CacheBlock(i, data, block2version, status);
                    map.put(toKey(keys), block);
                } catch (Exception ex) {
                    logger.warn("Not able to read record no " + i + " , skip reason " + ex.getMessage());
                    buf.clear();
                    error++;
                    continue;
                }
            }
            buf.clear();
            if (per > 0 && (i + 1) % per == 0) {
                logger.info((++j * 10) + "% complete");
            }
        }
    }
    dataOffset = dataChannel.size();
    keyOffset = keyChannel.size();
    //logOffset = logChannel.size();
    logger.info("Total record " + totalRecord + " deleted " + deleted + " error " + error + " active "
            + (totalRecord - deleted - error));
}

From source file:org.apache.hadoop.hbase.coprocessor.client.TimeseriesAggregationClient.java

/**
 * It computes average while fetching sum and row count from all the corresponding regions.
 * Approach is to compute a global sum of region level sum and rowcount and then compute the
 * average.//from w  w w.  ja v  a 2 s.com
 * @param table
 * @param scan
 * @throws Throwable
 */
private <R, S, P extends Message, Q extends Message, T extends Message> ConcurrentSkipListMap<Long, Pair<S, Long>> getAvgArgs(
        final Table table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    final TimeseriesAggregateRequest requestArg = validateArgAndGetPB(scan, ci, false, intervalSeconds,
            timestampSecondsMin, timestampSecondsMax, keyFilterPattern);
    class AvgCallBack implements Batch.Callback<TimeseriesAggregateResponse> {
        ConcurrentSkipListMap<Long, Pair<S, Long>> averages = new ConcurrentSkipListMap<Long, Pair<S, Long>>();

        public synchronized ConcurrentSkipListMap<Long, Pair<S, Long>> getAvgArgs() {
            return averages;
        }

        @Override
        public synchronized void update(byte[] region, byte[] row, TimeseriesAggregateResponse result) {
            List<TimeseriesAggregateResponseMapEntry> results = result.getEntryList();
            for (TimeseriesAggregateResponseMapEntry entry : results) {

                if (entry.getValue().getFirstPartCount() == 0) {
                    if (!averages.containsKey(entry.getKey())) {
                        averages.put(entry.getKey(), new Pair<S, Long>(null, 0L));
                    }
                } else {

                    ByteString b = entry.getValue().getFirstPart(0);
                    T t = null;
                    try {
                        t = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 4, b);
                    } catch (IOException e) {
                        // TODO Auto-generated catch block
                        e.printStackTrace();
                    }
                    S s = ci.getPromotedValueFromProto(t);

                    ByteBuffer bb = ByteBuffer.allocate(8)
                            .put(getBytesFromResponse(entry.getValue().getSecondPart()));
                    bb.rewind();

                    if (averages.containsKey(entry.getKey())) {
                        S sum = averages.get(entry.getKey()).getFirst();
                        Long rowCount = averages.get(entry.getKey()).getSecond();
                        averages.put(entry.getKey(),
                                new Pair<S, Long>(ci.add(sum, s), rowCount + bb.getLong()));
                    } else {
                        averages.put(entry.getKey(), new Pair<S, Long>(s, bb.getLong()));
                    }
                }
            }
        }
    }
    AvgCallBack avgCallBack = new AvgCallBack();
    table.coprocessorService(TimeseriesAggregateService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<TimeseriesAggregateService, TimeseriesAggregateResponse>() {
                @Override
                public TimeseriesAggregateResponse call(TimeseriesAggregateService instance)
                        throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<TimeseriesAggregateResponse> rpcCallback = new BlockingRpcCallback<TimeseriesAggregateResponse>();
                    instance.getAvg(controller, requestArg, rpcCallback);
                    TimeseriesAggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    // if (response.getEntryCount() > 0) {
                    return response;
                    // }
                    // return null;
                }
            }, avgCallBack);
    return avgCallBack.getAvgArgs();
}

From source file:com.yobidrive.diskmap.needles.Needle.java

public boolean getNeedleHeaderFromBuffer(ByteBuffer input) throws Exception {
    try {//from  w  w  w  . ja  v  a 2 s .c  om
        // Reinit needle
        keyBytes = null;
        version = null;
        flags = 0x00;
        size = 0;
        data = null;
        previousNeedle = null; // Chaining
        readBytes = 0;
        // Processes reading
        input.rewind();
        int startPosition = input.position();
        int magic = input.getInt();
        if (magic == MAGICSTART_BADENDIAN) {
            if (input.order().equals(ByteOrder.BIG_ENDIAN))
                input.order(ByteOrder.LITTLE_ENDIAN);
            else
                input.order(ByteOrder.BIG_ENDIAN);
        } else if (magic != MAGICSTART) {
            logger.error("Buffer not starting with needle");
            return false;
        }
        needleNumber = input.getLong();
        flags = input.get();
        int keyLen = input.getInt();
        if (keyLen > 2028) {
            logger.error("Crazy needle key len");
            return false;
        }
        keyBytes = new byte[keyLen];
        input.get(keyBytes);
        int versionLen = input.getInt();
        if (versionLen > 1024 * 16) {
            logger.error("Crazy needle version len");
            return false;
        }
        if (versionLen == 0)
            version = null;
        else {
            byte[] versionBytes = new byte[versionLen];
            input.get(versionBytes);
            version = new VectorClock(versionBytes);
        }
        int previousLogNumber = input.getInt(); // Chaining
        long previousNeedleOffset = input.getLong(); // Chaining
        if (previousLogNumber != -1 && previousNeedleOffset != -1L) {
            previousNeedle = new NeedlePointer();
            previousNeedle.setNeedleFileNumber(previousLogNumber);
            previousNeedle.setNeedleOffset(previousNeedleOffset);
        }
        originalFileNumber = input.getInt(); // Original needle location (for cleaning)
        originalSize = input.getInt(); // Original needle size (for cleaning)
        size = input.getInt();

        readBytes = input.position() - startPosition;
        input.rewind();
        // input.mark() ;
        return true;
    } catch (BufferUnderflowException bue) {
        return false;
    }
}