List of usage examples for java.nio ByteBuffer position
public final int position()
From source file:net.jradius.packet.attribute.AttributeFactory.java
public static RadiusAttribute newAttribute(long vendor, long type, long len, int op, ByteBuffer buffer, boolean pool) { RadiusAttribute attr = null;/*w ww . java2 s .c om*/ int valueLength = (int) len; try { if (vendor > 1 || type == 26) { boolean needVendorAndType = (vendor < 1); boolean needVendorType = (type < 1); if (needVendorAndType) { vendor = RadiusFormat.getUnsignedInt(buffer); } if (needVendorAndType || needVendorType) { type = RadiusFormat.getUnsignedByte(buffer); } Long key = new Long(vendor << 16 | type); if (pool) { attr = borrow(key); } if (attr == null) { attr = vsa(vendor, type); } if (needVendorAndType || needVendorType) { VSAttribute vsa = (VSAttribute) attr; int vsaLength = 0; int vsaHeaderLen = 2; switch (vsa.getLengthLength()) { case 1: vsaLength = RadiusFormat.getUnsignedByte(buffer); break; case 2: vsaLength = RadiusFormat.getUnsignedShort(buffer); vsaHeaderLen++; break; case 4: vsaLength = (int) RadiusFormat.getUnsignedInt(buffer); vsaHeaderLen += 3; break; } if (vsa.hasContinuationByte) { vsa.continuation = (short) RadiusFormat.getUnsignedByte(buffer); vsaHeaderLen++; } valueLength = vsaLength - vsaHeaderLen; } } else { if (pool) { attr = borrow(type); } if (attr == null) { attr = attr(type); } } if (valueLength > 0) { attr.setValue(buffer.array(), buffer.position(), valueLength); buffer.position(buffer.position() + valueLength); } else { attr.setValue(null, 0, 0); } if (op > -1) { attr.setAttributeOp(op); } } catch (Exception e) { e.printStackTrace(); } return attr; }
From source file:com.healthmarketscience.jackcess.Column.java
/** * Writes the column definitions into a table definition buffer. * @param buffer Buffer to write to//from ww w .ja v a 2 s .c o m * @param columns List of Columns to write definitions for */ protected static void writeDefinitions(TableCreator creator, ByteBuffer buffer) throws IOException { List<Column> columns = creator.getColumns(); short columnNumber = (short) 0; short fixedOffset = (short) 0; short variableOffset = (short) 0; // we specifically put the "long variable" values after the normal // variable length values so that we have a better chance of fitting it // all (because "long variable" values can go in separate pages) short longVariableOffset = Column.countNonLongVariableLength(columns); for (Column col : columns) { // record this for later use when writing indexes col.setColumnNumber(columnNumber); int position = buffer.position(); buffer.put(col.getType().getValue()); buffer.putInt(Table.MAGIC_TABLE_NUMBER); //constant magic number buffer.putShort(columnNumber); //Column Number if (col.isVariableLength()) { if (!col.getType().isLongValue()) { buffer.putShort(variableOffset++); } else { buffer.putShort(longVariableOffset++); } } else { buffer.putShort((short) 0); } buffer.putShort(columnNumber); //Column Number again if (col.getType().isTextual()) { // this will write 4 bytes (note we don't support writing dbs which // use the text code page) writeSortOrder(buffer, col.getTextSortOrder(), creator.getFormat()); } else { if (col.getType().getHasScalePrecision()) { buffer.put(col.getPrecision()); // numeric precision buffer.put(col.getScale()); // numeric scale } else { buffer.put((byte) 0x00); //unused buffer.put((byte) 0x00); //unused } buffer.putShort((short) 0); //Unknown } buffer.put(col.getColumnBitFlags()); // misc col flags if (col.isCompressedUnicode()) { //Compressed buffer.put((byte) 1); } else { buffer.put((byte) 0); } buffer.putInt(0); //Unknown, but always 0. //Offset for fixed length columns if (col.isVariableLength()) { buffer.putShort((short) 0); } else { buffer.putShort(fixedOffset); fixedOffset += col.getType().getFixedSize(col.getLength()); } if (!col.getType().isLongValue()) { buffer.putShort(col.getLength()); //Column length } else { buffer.putShort((short) 0x0000); // unused } columnNumber++; if (LOG.isDebugEnabled()) { LOG.debug("Creating new column def block\n" + ByteUtil.toHexString(buffer, position, creator.getFormat().SIZE_COLUMN_DEF_BLOCK)); } } for (Column col : columns) { Table.writeName(buffer, col.getName(), creator.getCharset()); } }
From source file:byps.http.HWireClient.java
protected RequestToCancel createRequestForMessage(BMessage msg, BAsyncResult<BMessage> asyncResult, int timeoutSecondsRequest) { if (log.isDebugEnabled()) log.debug("createRequestForMessage(" + msg); ByteBuffer requestDataBuffer = msg.buf; if (log.isDebugEnabled()) { requestDataBuffer.mark();/*from w w w . j a v a2 s . c o m*/ BBufferJson bbuf = new BBufferJson(requestDataBuffer); log.debug(bbuf.toDetailString()); requestDataBuffer.reset(); } final RequestToCancel requestToCancel = new RequestToCancel(msg.header.messageId, 0L, 0L, asyncResult); final boolean isNegotiate = BNegotiate.isNegotiateMessage(requestDataBuffer); final boolean isJson = isNegotiate || BMessageHeader.detectProtocol(requestDataBuffer) == BMessageHeader.MAGIC_JSON; if (log.isDebugEnabled()) log.debug("isJson=" + isJson); try { StringBuilder destUrl = null; // Negotiate? if (isNegotiate) { // Send a GET request and pass the negotiate string as parameter String negoStr = new String(requestDataBuffer.array(), requestDataBuffer.position(), requestDataBuffer.limit(), "UTF-8"); negoStr = URLEncoder.encode(negoStr, "UTF-8"); String negoServlet = getServletPathForNegotiationAndAuthentication(); destUrl = getUrlStringBuilder(negoServlet); destUrl.append("&negotiate=").append(negoStr); // Clear session Cookie httpClient.clearHttpSession(); } // Reverse request (long-poll) ? else if ((msg.header.flags & BMessageHeader.FLAG_RESPONSE) != 0) { String longpollServlet = getServletPathForReverseRequest(); destUrl = getUrlStringBuilder(longpollServlet); timeoutSecondsRequest = 0; // timeout controlled by server, 10min by // default. } // Ordinary request else { destUrl = getUrlStringBuilder(""); } if (log.isDebugEnabled()) log.debug("open connection, url=" + destUrl); final HHttpRequest httpRequest = isNegotiate ? httpClient.get(destUrl.toString(), requestToCancel) : httpClient.post(destUrl.toString(), requestDataBuffer, requestToCancel); httpRequest.setTimeouts(timeoutSecondsClient, timeoutSecondsRequest); requestToCancel.setHttpRequest(httpRequest); addRequest(requestToCancel); } catch (Throwable e) { if (log.isDebugEnabled()) log.debug("received Throwable: " + e); BException bex = new BException(BExceptionC.IOERROR, "IO error", e); asyncResult.setAsyncResult(null, bex); } if (log.isDebugEnabled()) log.debug(")createRequestForMessage=" + requestToCancel); return requestToCancel; }
From source file:com.robonobo.eon.SEONConnection.java
/** * Blocks until there is data to read/*ww w .j a v a 2 s . com*/ * * @return A byte buffer with the incoming data */ public void read(ByteBuffer buf) throws EONException { receiveLock.lock(); try { while (true) { while (incomingDataBufs.size() == 0) { if (state == State.Closed) return; try { haveData.await(); } catch (InterruptedException e) { throw new EONException(e); } } ByteBuffer incoming = (ByteBuffer) incomingDataBufs.getFirst(); if (buf.remaining() >= incoming.remaining()) buf.put(incoming); else { int remain = buf.remaining(); buf.put(incoming.array(), incoming.position(), remain); incoming.position(incoming.position() + remain); } if (incoming.remaining() == 0) incomingDataBufs.removeFirst(); if (buf.remaining() == 0) return; if (incomingDataBufs.size() == 0) return; } } finally { receiveLock.unlock(); } }
From source file:org.openpilot_nonag.uavtalk.UAVTalk.java
/** * Send an object through the telemetry link. * @throws IOException//from w ww.j a v a 2s. c o m * @param[in] obj Object handle to send * @param[in] type Transaction type \return Success (true), Failure (false) */ private boolean transmitSingleObject(int type, long objId, long instId, UAVObject obj) throws IOException { int length = 0; assert (objMngr != null && outStream != null); // IMPORTANT : obj can be null (when type is NACK for example) // Determine data length if (type == TYPE_OBJ_REQ || type == TYPE_ACK || type == TYPE_NACK) { length = 0; } else { length = obj.getNumBytes(); } ByteBuffer bbuf = ByteBuffer.allocate(MAX_PACKET_LENGTH); bbuf.order(ByteOrder.LITTLE_ENDIAN); // Setup type and object id fields bbuf.put((byte) (SYNC_VAL & 0xff)); bbuf.put((byte) (type & 0xff)); bbuf.putShort((short) (length + HEADER_LENGTH)); bbuf.putInt((int) objId); bbuf.putShort((short) (instId & 0xffff)); // Check length if (length >= MAX_PAYLOAD_LENGTH) { ++stats.txErrors; return false; } // Copy data (if any) if (length > 0) try { if (obj.pack(bbuf) == 0) { ++stats.txErrors; return false; } } catch (Exception e) { ++stats.txErrors; // TODO Auto-generated catch block e.printStackTrace(); return false; } // Calculate checksum bbuf.put((byte) (updateCRC(0, bbuf.array(), bbuf.position()) & 0xff)); int packlen = bbuf.position(); bbuf.position(0); byte[] dst = new byte[packlen]; bbuf.get(dst, 0, packlen); outStream.write(dst); // Update stats ++stats.txObjects; stats.txBytes += bbuf.position(); stats.txObjectBytes += length; // Done return true; }
From source file:com.mellanox.r4h.DFSInputStream.java
private Callable<ByteBuffer> getFromOneDataNode(final DNAddrPair datanode, final LocatedBlock block, final long start, final long end, final ByteBuffer bb, final Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap, final int hedgedReadId) { final Span parentSpan = Trace.currentSpan(); return new Callable<ByteBuffer>() { @Override//from w w w. j av a 2 s . c om public ByteBuffer call() throws Exception { byte[] buf = bb.array(); int offset = bb.position(); TraceScope scope = Trace.startSpan("hedgedRead" + hedgedReadId, parentSpan); try { actualGetFromOneDataNode(datanode, block, start, end, buf, offset, corruptedBlockMap); return bb; } finally { scope.close(); } } }; }
From source file:com.mellanox.r4h.DFSInputStream.java
/** * Like {@link #fetchBlockByteRange(LocatedBlock, long, long, byte[], int, Map)} except we start up a second, parallel, 'hedged' read * if the first read is taking longer than configured amount of * time. We then wait on which ever read returns first. *//*w w w . java 2 s . c o m*/ private void hedgedFetchBlockByteRange(LocatedBlock block, long start, long end, byte[] buf, int offset, Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap) throws IOException { ArrayList<Future<ByteBuffer>> futures = new ArrayList<Future<ByteBuffer>>(); CompletionService<ByteBuffer> hedgedService = new ExecutorCompletionService<ByteBuffer>( dfsClient.getHedgedReadsThreadPool()); ArrayList<DatanodeInfo> ignored = new ArrayList<DatanodeInfo>(); ByteBuffer bb = null; int len = (int) (end - start + 1); int hedgedReadId = 0; block = getBlockAt(block.getStartOffset()); while (true) { // see HDFS-6591, this metric is used to verify/catch unnecessary loops hedgedReadOpsLoopNumForTesting++; DNAddrPair chosenNode = null; // there is no request already executing. if (futures.isEmpty()) { // chooseDataNode is a commitment. If no node, we go to // the NN to reget block locations. Only go here on first read. chosenNode = chooseDataNode(block, ignored); bb = ByteBuffer.wrap(buf, offset, len); Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(chosenNode, block, start, end, bb, corruptedBlockMap, hedgedReadId++); Future<ByteBuffer> firstRequest = hedgedService.submit(getFromDataNodeCallable); futures.add(firstRequest); try { Future<ByteBuffer> future = hedgedService.poll(dfsClient.getHedgedReadTimeout(), TimeUnit.MILLISECONDS); if (future != null) { future.get(); return; } if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Waited " + dfsClient.getHedgedReadTimeout() + "ms to read from " + chosenNode.info + "; spawning hedged read"); } // Ignore this node on next go around. ignored.add(chosenNode.info); dfsClient.getHedgedReadMetrics().incHedgedReadOps(); continue; // no need to refresh block locations } catch (InterruptedException e) { // Ignore } catch (ExecutionException e) { // Ignore already logged in the call. } } else { // We are starting up a 'hedged' read. We have a read already // ongoing. Call getBestNodeDNAddrPair instead of chooseDataNode. // If no nodes to do hedged reads against, pass. try { try { chosenNode = getBestNodeDNAddrPair(block, ignored); } catch (IOException ioe) { chosenNode = chooseDataNode(block, ignored); } bb = ByteBuffer.allocate(len); Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(chosenNode, block, start, end, bb, corruptedBlockMap, hedgedReadId++); Future<ByteBuffer> oneMoreRequest = hedgedService.submit(getFromDataNodeCallable); futures.add(oneMoreRequest); } catch (IOException ioe) { if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Failed getting node for hedged read: " + ioe.getMessage()); } } // if not succeeded. Submit callables for each datanode in a loop, wait // for a fixed interval and get the result from the fastest one. try { ByteBuffer result = getFirstToComplete(hedgedService, futures); // cancel the rest. cancelAll(futures); if (result.array() != buf) { // compare the array pointers dfsClient.getHedgedReadMetrics().incHedgedReadWins(); System.arraycopy(result.array(), result.position(), buf, offset, len); } else { dfsClient.getHedgedReadMetrics().incHedgedReadOps(); } return; } catch (InterruptedException ie) { // Ignore and retry } // We got here if exception. Ignore this node on next go around IFF // we found a chosenNode to hedge read against. if (chosenNode != null && chosenNode.info != null) { ignored.add(chosenNode.info); } } } }
From source file:com.healthmarketscience.jackcess.Table.java
/** * Reads the column data from the given row buffer. Leaves limit unchanged. * Caches the returned value in the rowState. */// w w w. j a v a2s. com private static Object getRowColumn(JetFormat format, ByteBuffer rowBuffer, Column column, RowState rowState, Map<Column, byte[]> rawVarValues) throws IOException { byte[] columnData = null; try { NullMask nullMask = rowState.getNullMask(rowBuffer); boolean isNull = nullMask.isNull(column); if (column.getType() == DataType.BOOLEAN) { // Boolean values are stored in the null mask. see note about // caching below return rowState.setRowValue(column.getColumnIndex(), Boolean.valueOf(!isNull)); } else if (isNull) { // well, that's easy! (no need to update cache w/ null) return null; } // reset position to row start rowBuffer.reset(); // locate the column data bytes int rowStart = rowBuffer.position(); int colDataPos = 0; int colDataLen = 0; if (!column.isVariableLength()) { // read fixed length value (non-boolean at this point) int dataStart = rowStart + format.OFFSET_COLUMN_FIXED_DATA_ROW_OFFSET; colDataPos = dataStart + column.getFixedDataOffset(); colDataLen = column.getType().getFixedSize(column.getLength()); } else { int varDataStart; int varDataEnd; if (format.SIZE_ROW_VAR_COL_OFFSET == 2) { // read simple var length value int varColumnOffsetPos = (rowBuffer.limit() - nullMask.byteSize() - 4) - (column.getVarLenTableIndex() * 2); varDataStart = rowBuffer.getShort(varColumnOffsetPos); varDataEnd = rowBuffer.getShort(varColumnOffsetPos - 2); } else { // read jump-table based var length values short[] varColumnOffsets = readJumpTableVarColOffsets(rowState, rowBuffer, rowStart, nullMask); varDataStart = varColumnOffsets[column.getVarLenTableIndex()]; varDataEnd = varColumnOffsets[column.getVarLenTableIndex() + 1]; } colDataPos = rowStart + varDataStart; colDataLen = varDataEnd - varDataStart; } // grab the column data columnData = new byte[colDataLen]; rowBuffer.position(colDataPos); rowBuffer.get(columnData); if ((rawVarValues != null) && column.isVariableLength()) { // caller wants raw value as well rawVarValues.put(column, columnData); } // parse the column data. we cache the row values in order to be able // to update the index on row deletion. note, most of the returned // values are immutable, except for binary data (returned as byte[]), // but binary data shouldn't be indexed anyway. return rowState.setRowValue(column.getColumnIndex(), column.read(columnData)); } catch (Exception e) { // cache "raw" row value. see note about caching above rowState.setRowValue(column.getColumnIndex(), Column.rawDataWrapper(columnData)); return rowState.handleRowError(column, columnData, e); } }
From source file:com.koda.integ.hbase.storage.FileExtStorage.java
@Override public StorageHandle storeData(ByteBuffer buf) { writeLock.writeLock().lock();//from w w w . ja v a2s. c o m int pos = 0; try { if (activeBuffer.get() == null) { // unlock writeLock.writeLock().unlock(); // Get next buffer from empty queue - blocking call ByteBuffer bbuf = emptyBuffersQueue.take(); // lock again writeLock.writeLock().lock(); if (activeBuffer.get() == null) { activeBuffer.set(bbuf); bufferOffset.set(0); } else { // somebody already set the activeBuffer // repeat call recursively emptyBuffersQueue.offer(bbuf); writeLock.writeLock().unlock(); return storeData(buf); } } pos = buf.position(); long currentFileLength = currentFileOffsetForWrites.get(); if (bufferOffset.get() == 0 && currentFileLength + bufferSize > fileSizeLimit) { // previous buffer was flushed currentFileOffsetForWrites.set(0); maxIdForWrites.incrementAndGet(); } int size = buf.getInt(); long off = bufferOffset.getAndAdd(size + 4); if (off + size + 4 > bufferSize) { // send current buffer to write queue ByteBuffer buff = activeBuffer.get(); //verifyBuffer(buff); writeQueue.offer(buff); activeBuffer.set(null); if (currentFileLength + bufferSize > fileSizeLimit) { currentFileOffsetForWrites.set(0); maxIdForWrites.incrementAndGet(); } // release lock writeLock.writeLock().unlock(); // Get next buffer from empty queue ByteBuffer bbuf = emptyBuffersQueue.take(); // lock again writeLock.writeLock().lock(); if (activeBuffer.get() == null) { activeBuffer.set(bbuf); } else { // some other thread set already the activeBuffer // repeat call recursively emptyBuffersQueue.offer(bbuf); writeLock.writeLock().unlock(); buf.position(pos); return storeData(buf); } bufferOffset.set(size + 4); // Check if need advance file } // We need to keep overall object (key+block) size in a file buf.position(pos); buf.limit(pos + size + 4); activeBuffer.get().put(buf); FileStorageHandle fsh = new FileStorageHandle(maxIdForWrites.get(), (int) (currentFileOffsetForWrites.get()), size); // Increase offset in current file for writes; currentFileOffsetForWrites.addAndGet(size + 4); return fsh; } catch (InterruptedException e) { e.printStackTrace(); writeLock.writeLock().unlock(); buf.position(pos); return storeData(buf); } finally { WriteLock lock = writeLock.writeLock(); if (lock.isHeldByCurrentThread()) { lock.unlock(); } } }
From source file:libepg.ts.reader.Reader2.java
/** * ??????<br>/*from w w w . j av a 2s .c om*/ * 1:???????????1??????<br> * 2:?????????????<br> * 3:??????1??????<br> * ???????????<br> * 4:1?<br> * * @return ??? */ public synchronized List<TsPacket> getPackets() { ByteBuffer packetBuffer = ByteBuffer.allocate(TsPacket.TS_PACKET_BYTE_LENGTH.PACKET_LENGTH.getByteLength()); byte[] byteData = new byte[1]; //? List<TsPacket> packets = new ArrayList<>(); FileInputStream fis = null; PushbackInputStream pis = null; try { fis = new FileInputStream(this.TSFile); pis = new PushbackInputStream(fis); boolean tipOfPacket = false;//? long count = 0; //??????1?????? while (pis.read(byteData) != EOF) { //??????????????? if ((byteData[0] == TsPacket.TS_SYNC_BYTE) && (tipOfPacket == false)) { tipOfPacket = true; if (LOG.isTraceEnabled() && NOT_DETERRENT_READ_TRACE_LOG) { LOG.trace( "???????????1????"); } pis.unread(byteData); } if (tipOfPacket == true) { byte[] tsPacketData = new byte[TsPacket.TS_PACKET_BYTE_LENGTH.PACKET_LENGTH.getByteLength()]; if (pis.read(tsPacketData) != EOF) { if (LOG.isTraceEnabled() && NOT_DETERRENT_READ_TRACE_LOG) { LOG.trace( "??????????????"); } packetBuffer.put(tsPacketData); } else { break; } } if (packetBuffer.remaining() == 0) { byte[] BeforeCutDown = packetBuffer.array(); byte[] AfterCutDown = new byte[packetBuffer.position()]; System.arraycopy(BeforeCutDown, 0, AfterCutDown, 0, AfterCutDown.length); //?????????? TsPacket tsp = new TsPacket(AfterCutDown); // LOG.debug(Hex.encodeHexString(tsp.getData())); if (LOG.isTraceEnabled() && NOT_DETERRENT_READ_TRACE_LOG) { LOG.trace( "1???????? "); LOG.trace(tsp.toString()); } if (tsp.getTransport_error_indicator() != 0) { if (LOG.isWarnEnabled()) { LOG.warn( "??1????????????????????"); LOG.warn(tsp); LOG.warn(TSFile); } tipOfPacket = false; } else { packets.add(tsp); count++; } packetBuffer.clear(); tipOfPacket = false; if (this.readLimit != null && count >= this.readLimit) { if (LOG.isInfoEnabled()) { LOG.info( "????????????? ?? = " + this.readLimit); } break; } } } if (LOG.isTraceEnabled() && NOT_DETERRENT_READ_TRACE_LOG) { LOG.trace("?????????"); LOG.trace(" = " + Hex.encodeHexString(packetBuffer.array())); } pis.close(); fis.close(); LOG.info("??? = " + count); } catch (FileNotFoundException e) { LOG.fatal("?????", e); } catch (IOException e) { LOG.fatal("???", e); } return Collections.unmodifiableList(packets); }