List of usage examples for java.nio ByteBuffer getLong
public abstract long getLong();
From source file:org.opendaylight.lispflowmapping.implementation.serializer.MapRegisterSerializer.java
public MapRegister deserialize(ByteBuffer registerBuffer) { try {//from w ww . j ava 2 s . com MapRegisterBuilder builder = new MapRegisterBuilder(); builder.setEidToLocatorRecord(new ArrayList<EidToLocatorRecord>()); byte typeAndFlags = registerBuffer.get(); boolean xtrSiteIdPresent = ByteUtil.extractBit(typeAndFlags, Flags.XTRSITEID); builder.setProxyMapReply(ByteUtil.extractBit(typeAndFlags, Flags.PROXY)); builder.setXtrSiteIdPresent(xtrSiteIdPresent); registerBuffer.position(registerBuffer.position() + Length.RES); builder.setWantMapNotify(ByteUtil.extractBit(registerBuffer.get(), Flags.WANT_MAP_REPLY)); byte recordCount = (byte) ByteUtil.getUnsignedByte(registerBuffer); builder.setNonce(registerBuffer.getLong()); builder.setKeyId(registerBuffer.getShort()); short authenticationLength = registerBuffer.getShort(); byte[] authenticationData = new byte[authenticationLength]; registerBuffer.get(authenticationData); builder.setAuthenticationData(authenticationData); for (int i = 0; i < recordCount; i++) { builder.getEidToLocatorRecord().add(new EidToLocatorRecordBuilder( EidToLocatorRecordSerializer.getInstance().deserialize(registerBuffer)).build()); } if (xtrSiteIdPresent) { byte[] xtrId = new byte[Length.XTRID_SIZE]; registerBuffer.get(xtrId); byte[] siteId = new byte[Length.SITEID_SIZE]; registerBuffer.get(siteId); builder.setXtrId(xtrId); builder.setSiteId(siteId); } registerBuffer.limit(registerBuffer.position()); byte[] mapRegisterBytes = new byte[registerBuffer.position()]; registerBuffer.position(0); registerBuffer.get(mapRegisterBytes); return builder.build(); } catch (RuntimeException re) { throw new LispSerializationException( "Couldn't deserialize Map-Register (len=" + registerBuffer.capacity() + ")", re); } }
From source file:org.opendaylight.controller.protocol_plugin.openflow.vendorextension.v6extension.V6StatsReply.java
@Override public void readFrom(ByteBuffer data) { short i;//w w w .ja v a 2 s . c om this.length = data.getShort(); if (length < MINIMUM_LENGTH) return; //TBD - Spurious Packet? this.tableId = data.get(); data.get(); // pad this.durationSeconds = data.getInt(); this.durationNanoseconds = data.getInt(); this.priority = data.getShort(); this.idleTimeout = data.getShort(); this.hardTimeout = data.getShort(); this.match_len = data.getShort(); this.idleAge = data.getShort(); this.hardAge = data.getShort(); this.cookie = data.getLong(); this.packetCount = data.getLong(); this.byteCount = data.getLong(); if (this.length == MINIMUM_LENGTH) { return; //TBD - can this happen?? } if (this.match == null) this.match = new V6Match(); ByteBuffer mbuf = ByteBuffer.allocate(match_len); for (i = 0; i < match_len; i++) { mbuf.put(data.get()); } mbuf.rewind(); this.match.readFrom(mbuf); if (this.actionFactory == null) throw new RuntimeException("OFActionFactory not set"); /* * action list may be preceded by a padding of 0 to 7 bytes based upon this: */ short pad_size = (short) (((match_len + 7) / 8) * 8 - match_len); for (i = 0; i < pad_size; i++) data.get(); int action_len = this.length - MINIMUM_LENGTH - (match_len + pad_size); if (action_len > 0) this.actions = this.actionFactory.parseActions(data, action_len); }
From source file:org.apache.hadoop.hbase.client.coprocessor.AggregationClient.java
/** * It gives the row count, by summing up the individual results obtained from * regions. In case the qualifier is null, FirstKeyValueFilter is used to * optimised the operation. In case qualifier is provided, I can't use the * filter as it may set the flag to skip to next row, but the value read is * not of the given filter: in this case, this particular row will not be * counted ==> an error.//w w w.ja v a2 s.c o m * @param table * @param ci * @param scan * @return <R, S> * @throws Throwable */ public <R, S, P extends Message, Q extends Message, T extends Message> long rowCount(final HTable table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, true); class RowNumCallback implements Batch.Callback<Long> { private final AtomicLong rowCountL = new AtomicLong(0); public long getRowNumCount() { return rowCountL.get(); } @Override public void update(byte[] region, byte[] row, Long result) { rowCountL.addAndGet(result.longValue()); } } RowNumCallback rowNum = new RowNumCallback(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), new Batch.Call<AggregateService, Long>() { @Override public Long call(AggregateService instance) throws IOException { ServerRpcController controller = new ServerRpcController(); BlockingRpcCallback<AggregateResponse> rpcCallback = new BlockingRpcCallback<AggregateResponse>(); instance.getRowNum(controller, requestArg, rpcCallback); AggregateResponse response = rpcCallback.get(); if (controller.failedOnException()) { throw controller.getFailedOn(); } byte[] bytes = getBytesFromResponse(response.getFirstPart(0)); ByteBuffer bb = ByteBuffer.allocate(8).put(bytes); bb.rewind(); return bb.getLong(); } }, rowNum); return rowNum.getRowNumCount(); }
From source file:org.apache.hadoop.hbase.client.transactional.TransactionalAggregationClient.java
/** * It gives the row count, by summing up the individual results obtained from * regions. In case the qualifier is null, FirstKeyValueFilter is used to * optimised the operation. In case qualifier is provided, I can't use the * filter as it may set the flag to skip to next row, but the value read is * not of the given filter: in this case, this particular row will not be * counted ==> an error.//from w w w .ja v a 2s . c o m * @param table * @param ci * @param scan * @return <R, S> * @throws Throwable */ public <R, S, P extends Message, Q extends Message, T extends Message> long rowCount(final long transactionId, final TransactionalTable table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable { byte[] currentBeginKey = scan.getStartRow(); HRegionInfo currentRegion = table.getRegionLocation(currentBeginKey).getRegionInfo(); com.google.protobuf.ByteString regionName = ByteString.copyFromUtf8(currentRegion.getRegionNameAsString()); final TransactionalAggregateRequest requestArg = validateArgAndGetPB(regionName, transactionId, scan, ci, true); class RowNumCallback implements Batch.Callback<Long> { private final AtomicLong rowCountL = new AtomicLong(0); public long getRowNumCount() { return rowCountL.get(); } @Override public void update(byte[] region, byte[] row, Long result) { rowCountL.addAndGet(result.longValue()); } } RowNumCallback rowNum = new RowNumCallback(); table.coprocessorService(TrxRegionService.class, scan.getStartRow(), scan.getStopRow(), new Batch.Call<TrxRegionService, Long>() { @Override public Long call(TrxRegionService instance) throws IOException { ServerRpcController controller = new ServerRpcController(); BlockingRpcCallback<TransactionalAggregateResponse> rpcCallback = new BlockingRpcCallback<TransactionalAggregateResponse>(); instance.getRowNum(controller, requestArg, rpcCallback); TransactionalAggregateResponse response = rpcCallback.get(); if (controller.failedOnException()) { throw controller.getFailedOn(); } byte[] bytes = getBytesFromResponse(response.getFirstPart(0)); ByteBuffer bb = ByteBuffer.allocate(8).put(bytes); bb.rewind(); return bb.getLong(); } }, rowNum); return rowNum.getRowNumCount(); }
From source file:org.apache.hadoop.hbase.client.coprocessor.AggregationClient.java
/** * It computes average while fetching sum and row count from all the * corresponding regions. Approach is to compute a global sum of region level * sum and rowcount and then compute the average. * @param table// ww w .j a va 2s.com * @param scan * @throws Throwable */ private <R, S, P extends Message, Q extends Message, T extends Message> Pair<S, Long> getAvgArgs( final HTable table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class AvgCallBack implements Batch.Callback<Pair<S, Long>> { S sum = null; Long rowCount = 0l; public synchronized Pair<S, Long> getAvgArgs() { return new Pair<S, Long>(sum, rowCount); } @Override public synchronized void update(byte[] region, byte[] row, Pair<S, Long> result) { sum = ci.add(sum, result.getFirst()); rowCount += result.getSecond(); } } AvgCallBack avgCallBack = new AvgCallBack(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), new Batch.Call<AggregateService, Pair<S, Long>>() { @Override public Pair<S, Long> call(AggregateService instance) throws IOException { ServerRpcController controller = new ServerRpcController(); BlockingRpcCallback<AggregateResponse> rpcCallback = new BlockingRpcCallback<AggregateResponse>(); instance.getAvg(controller, requestArg, rpcCallback); AggregateResponse response = rpcCallback.get(); if (controller.failedOnException()) { throw controller.getFailedOn(); } Pair<S, Long> pair = new Pair<S, Long>(null, 0L); if (response.getFirstPartCount() == 0) { return pair; } ByteString b = response.getFirstPart(0); T t = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 4, b); S s = ci.getPromotedValueFromProto(t); pair.setFirst(s); ByteBuffer bb = ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart())); bb.rewind(); pair.setSecond(bb.getLong()); return pair; } }, avgCallBack); return avgCallBack.getAvgArgs(); }
From source file:org.apache.hadoop.hbase.client.coprocessor.AggregationClient.java
/** * It computes average while fetching sum and row count from all the * corresponding regions. Approach is to compute a global sum of region level * sum and rowcount and then compute the average. * @param table/*w w w.j a va2s. com*/ * @param scan * @throws Throwable */ private <R, S, P extends Message, Q extends Message, T extends Message> Pair<S, Long> getAvgArgs( final HTable table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class AvgCallBack implements Batch.Callback<Pair<S, Long>> { S sum = null; Long rowCount = 0l; public Pair<S, Long> getAvgArgs() { return new Pair<S, Long>(sum, rowCount); } @Override public synchronized void update(byte[] region, byte[] row, Pair<S, Long> result) { sum = ci.add(sum, result.getFirst()); rowCount += result.getSecond(); } } AvgCallBack avgCallBack = new AvgCallBack(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), new Batch.Call<AggregateService, Pair<S, Long>>() { @Override public Pair<S, Long> call(AggregateService instance) throws IOException { ServerRpcController controller = new ServerRpcController(); BlockingRpcCallback<AggregateResponse> rpcCallback = new BlockingRpcCallback<AggregateResponse>(); instance.getAvg(controller, requestArg, rpcCallback); AggregateResponse response = rpcCallback.get(); if (controller.failedOnException()) { throw controller.getFailedOn(); } Pair<S, Long> pair = new Pair<S, Long>(null, 0L); if (response.getFirstPartCount() == 0) { return pair; } ByteString b = response.getFirstPart(0); T t = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 4, b); S s = ci.getPromotedValueFromProto(t); pair.setFirst(s); ByteBuffer bb = ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart())); bb.rewind(); pair.setSecond(bb.getLong()); return pair; } }, avgCallBack); return avgCallBack.getAvgArgs(); }
From source file:org.apache.hadoop.hbase.client.coprocessor.AggregationClient.java
/** * It computes a global standard deviation for a given column and its value. * Standard deviation is square root of (average of squares - * average*average). From individual regions, it obtains sum, square sum and * number of rows. With these, the above values are computed to get the global * std./*w ww .j a va 2 s. co m*/ * @param table * @param scan * @return standard deviations * @throws Throwable */ private <R, S, P extends Message, Q extends Message, T extends Message> Pair<List<S>, Long> getStdArgs( final HTable table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class StdCallback implements Batch.Callback<Pair<List<S>, Long>> { long rowCountVal = 0l; S sumVal = null, sumSqVal = null; public synchronized Pair<List<S>, Long> getStdParams() { List<S> l = new ArrayList<S>(); l.add(sumVal); l.add(sumSqVal); Pair<List<S>, Long> p = new Pair<List<S>, Long>(l, rowCountVal); return p; } @Override public synchronized void update(byte[] region, byte[] row, Pair<List<S>, Long> result) { if (result.getFirst().size() > 0) { sumVal = ci.add(sumVal, result.getFirst().get(0)); sumSqVal = ci.add(sumSqVal, result.getFirst().get(1)); rowCountVal += result.getSecond(); } } } StdCallback stdCallback = new StdCallback(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), new Batch.Call<AggregateService, Pair<List<S>, Long>>() { @Override public Pair<List<S>, Long> call(AggregateService instance) throws IOException { ServerRpcController controller = new ServerRpcController(); BlockingRpcCallback<AggregateResponse> rpcCallback = new BlockingRpcCallback<AggregateResponse>(); instance.getStd(controller, requestArg, rpcCallback); AggregateResponse response = rpcCallback.get(); if (controller.failedOnException()) { throw controller.getFailedOn(); } Pair<List<S>, Long> pair = new Pair<List<S>, Long>(new ArrayList<S>(), 0L); if (response.getFirstPartCount() == 0) { return pair; } List<S> list = new ArrayList<S>(); for (int i = 0; i < response.getFirstPartCount(); i++) { ByteString b = response.getFirstPart(i); T t = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 4, b); S s = ci.getPromotedValueFromProto(t); list.add(s); } pair.setFirst(list); ByteBuffer bb = ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart())); bb.rewind(); pair.setSecond(bb.getLong()); return pair; } }, stdCallback); return stdCallback.getStdParams(); }
From source file:org.apache.hadoop.hbase.client.coprocessor.AggregationClient.java
/** * It computes a global standard deviation for a given column and its value. * Standard deviation is square root of (average of squares - * average*average). From individual regions, it obtains sum, square sum and * number of rows. With these, the above values are computed to get the global * std./*from w w w . j a va2 s. co m*/ * @param table * @param scan * @return standard deviations * @throws Throwable */ private <R, S, P extends Message, Q extends Message, T extends Message> Pair<List<S>, Long> getStdArgs( final HTable table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class StdCallback implements Batch.Callback<Pair<List<S>, Long>> { long rowCountVal = 0l; S sumVal = null, sumSqVal = null; public Pair<List<S>, Long> getStdParams() { List<S> l = new ArrayList<S>(); l.add(sumVal); l.add(sumSqVal); Pair<List<S>, Long> p = new Pair<List<S>, Long>(l, rowCountVal); return p; } @Override public synchronized void update(byte[] region, byte[] row, Pair<List<S>, Long> result) { if (result.getFirst().size() > 0) { sumVal = ci.add(sumVal, result.getFirst().get(0)); sumSqVal = ci.add(sumSqVal, result.getFirst().get(1)); rowCountVal += result.getSecond(); } } } StdCallback stdCallback = new StdCallback(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), new Batch.Call<AggregateService, Pair<List<S>, Long>>() { @Override public Pair<List<S>, Long> call(AggregateService instance) throws IOException { ServerRpcController controller = new ServerRpcController(); BlockingRpcCallback<AggregateResponse> rpcCallback = new BlockingRpcCallback<AggregateResponse>(); instance.getStd(controller, requestArg, rpcCallback); AggregateResponse response = rpcCallback.get(); if (controller.failedOnException()) { throw controller.getFailedOn(); } Pair<List<S>, Long> pair = new Pair<List<S>, Long>(new ArrayList<S>(), 0L); if (response.getFirstPartCount() == 0) { return pair; } List<S> list = new ArrayList<S>(); for (int i = 0; i < response.getFirstPartCount(); i++) { ByteString b = response.getFirstPart(i); T t = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 4, b); S s = ci.getPromotedValueFromProto(t); list.add(s); } pair.setFirst(list); ByteBuffer bb = ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart())); bb.rewind(); pair.setSecond(bb.getLong()); return pair; } }, stdCallback); return stdCallback.getStdParams(); }
From source file:org.cosmo.common.record.Defn.java
public Object readAll(int maxCounts) throws IOException { int count = Math.min(readCount(), maxCounts); byte[] buf = readFullRawBytes(maxCounts).array(); ByteBuffer readDataIO = ByteBuffer.allocate(size()); Object elements = null;/* w w w . j a va 2 s .co m*/ if (this instanceof DefnRecord) { elements = Array.newInstance(long.class, count); for (int i = 0, offset = 0, size = size(), c = count; i < c; i++, offset += size) { readDataIO.put(buf, offset, size); readDataIO.rewind(); readDataIO.get(); // skip header byte Array.set(elements, i, readDataIO.getLong()); readDataIO.rewind(); } } else { elements = Array.newInstance(field().getType(), count); for (int i = 0, offset = 0, size = size(), c = count; i < c; i++, offset += size) { readDataIO.put(buf, offset, size); readDataIO.rewind(); readDataIO.get(); // skip header byte Array.set(elements, i, readImpl(readDataIO, false)); readDataIO.rewind(); } } return elements; }
From source file:org.voltdb.utils.CatalogUtil.java
/** * Retrieve the catalog and deployment configuration from zookeeper. * NOTE: In general, people who want the catalog and/or deployment should * be getting it from the current CatalogContext, available from * VoltDB.instance(). This is primarily for startup and for use by * @UpdateApplicationCatalog. If you think this is where you need to * be getting catalog or deployment from, consider carefully if that's * really what you want to do. --izzy 12/8/2014 *//*from w w w .j a v a 2 s . c om*/ public static CatalogAndIds getCatalogFromZK(ZooKeeper zk) throws KeeperException, InterruptedException { ByteBuffer versionAndBytes = ByteBuffer.wrap(zk.getData(VoltZK.catalogbytes, false, null)); int version = versionAndBytes.getInt(); long catalogTxnId = versionAndBytes.getLong(); long catalogUniqueId = versionAndBytes.getLong(); byte[] catalogHash = new byte[20]; // sha-1 hash size versionAndBytes.get(catalogHash); byte[] deploymentHash = new byte[20]; // sha-1 hash size versionAndBytes.get(deploymentHash); int catalogLength = versionAndBytes.getInt(); byte[] catalogBytes = new byte[catalogLength]; versionAndBytes.get(catalogBytes); int deploymentLength = versionAndBytes.getInt(); byte[] deploymentBytes = new byte[deploymentLength]; versionAndBytes.get(deploymentBytes); versionAndBytes = null; return new CatalogAndIds(catalogTxnId, catalogUniqueId, version, catalogHash, deploymentHash, catalogBytes, deploymentBytes); }