List of usage examples for java.nio ByteBuffer flip
public final Buffer flip()
From source file:com.yobidrive.diskmap.buckets.BucketTableManager.java
private void commitBucketTableToDisk() throws BucketTableManagerException { File currentFile = null;//from w ww. j av a2 s.com FileChannel fileChannel = null; ByteBuffer headerBuffer = null; try { logger.warn("Start commit bucket table..."); if (bucketTable.getRequestedCheckPoint() == null || bucketTable.getRequestedCheckPoint().isEmpty()) throw new BucketTableManagerException("commit requested while there is no requested checkpoint"); currentFile = getLatestCommitedFile(); File nextFile = getNextFile(getLatestCommitedFile()); fileChannel = (new RandomAccessFile(nextFile, "rw")).getChannel(); // Write header with empty checkpoint headerBuffer = ByteBuffer.allocate(HEADERSIZE); fileChannel.position(0L); headerBuffer.putInt(MAGICSTART); headerBuffer.putLong(mapSize); // NeedlePointer lastCheckPoint = bucketTable.getLastCheckPoint() ; // Reset checkpoint to no checkpoint done NeedlePointer lastCheckPoint = new NeedlePointer(); // Empty needle lastCheckPoint.putNeedlePointerToBuffer(headerBuffer); headerBuffer.putInt(MAGICEND); headerBuffer.flip(); // truncate buffer fileChannel.write(headerBuffer); // Now writes buffers for (int i = 0; i < nbBuffers; i++) { bucketTable.prepareBufferForWriting(i); int written = fileChannel.write(bucketTable.getBuffer(i)); if (written < bucketTable.getBuffer(i).limit()) throw new BucketTableManagerException("Incomplete write for bucket table file " + nextFile.getName() + ", expected " + mapSize + HEADERSIZE); // else // logger.info("Bucket table commit: written "+(i+1)*entriesPerBuffer+" buckets"+((i<(nbBuffers-1))?"...":"")) ; try { Thread.sleep(10); } catch (Throwable th) { } } // Writes second magic number ByteBuffer buffer = ByteBuffer.allocate(NeedleLogInfo.INFOSIZE); buffer.rewind(); buffer.limit(INTSIZE); buffer.putInt(MAGICSTART); buffer.rewind(); fileChannel.write(buffer); // Write Needle Log Info Iterator<NeedleLogInfo> it = logInfoPerLogNumber.values().iterator(); while (it.hasNext()) { buffer.rewind(); buffer.limit(NeedleLogInfo.INFOSIZE); NeedleLogInfo nli = it.next(); nli.putNeedleLogInfo(buffer, true); int written = fileChannel.write(buffer); if (written < NeedleLogInfo.INFOSIZE) throw new BucketTableManagerException( "Incomplete write for bucket table file, writing log infos " + nextFile.getName()); } // Writes checkpoint headerBuffer = ByteBuffer.allocate(NeedlePointer.POINTERSIZE); headerBuffer.rewind(); headerBuffer.limit(NeedlePointer.POINTERSIZE); // System.out.println("Writing checkpoint in index "+bucketTable.getRequestedCheckPoint()) ; bucketTable.getRequestedCheckPoint().putNeedlePointerToBuffer(headerBuffer, true); // Flip buffer after write headerBuffer.rewind(); // fileChannel.force(false) ; if (fileChannel.write(headerBuffer, CHECKPOINTOFFSET) < NeedlePointer.POINTERSIZE) { throw new BucketTableManagerException("Could not write checkpoint to " + nextFile.getName()); } fileChannel.force(true); fileChannel.close(); if (!nextFile.renameTo(getCommittedFile(nextFile))) throw new BucketTableManagerException( "Could not rename " + nextFile.getName() + " to " + getCommittedFile(nextFile).getName()); logger.warn("Committed bucket table."); } catch (IOException ie) { throw new BucketTableManagerException("Failed writting bucket table", ie); } finally { headerBuffer = null; //May ease garbage collection if (fileChannel != null) { try { fileChannel.close(); } catch (Exception ex) { throw new BucketTableManagerException("Failed to close file channel", ex); } } } try { if (currentFile != null) { if (!currentFile.delete()) logger.error("Failed deleting previous bucket table" + currentFile.getName()); } } catch (Throwable th) { logger.error("Failed deleting previous bucket table" + currentFile.getName(), th); } }
From source file:bamboo.openhash.fileshare.FileShare.java
public void write() { logger.debug("write"); if (is != null) { while (ready.size() < MAX_BUFFER) { ByteBuffer bb = ByteBuffer.wrap(new byte[1024]); bb.putInt(0);// w w w . j ava 2s. c om int len = 0; try { len = is.read(bb.array(), 4, bb.limit() - 4); } catch (IOException e) { is = null; break; } if (len == -1) { is = null; break; } logger.debug("position=" + bb.position() + " read " + len + " bytes"); // We're going to flip this later, so set the position // where we want the limit to end up. bb.position(len + 4); wblocks.elementAt(0).addLast(bb); logger.debug("read a block"); if (wblocks.elementAt(0).size() == BRANCHING) make_parents(false); } if (is == null) { make_parents(true); // There should now be only one non-empty level, at it // should have exactly one block in it. for (int l = 0; l < wblocks.size(); ++l) { if (!wblocks.elementAt(l).isEmpty()) { ByteBuffer bb = wblocks.elementAt(l).removeFirst(); bb.flip(); md.update(secret); md.update(bb.array(), 0, bb.limit()); byte[] dig = md.digest(); StringBuffer sb = new StringBuffer(100); bytes_to_sbuf(dig, 0, dig.length, false, sb); logger.info("root digest is 0x" + sb.toString()); ready.addLast(new Pair<byte[], ByteBuffer>(dig, bb)); break; } } } } // Do put. if (ready.isEmpty()) { if (outstanding == 0) { logger.info("all puts finished successfully"); System.exit(0); } } else { Pair<byte[], ByteBuffer> head = ready.removeFirst(); outstanding++; bamboo_put_args put = new bamboo_put_args(); put.application = APPLICATION; // GatewayClient will fill in put.client_library put.value = new bamboo_value(); if (head.second.limit() == head.second.array().length) put.value.value = head.second.array(); else { put.value.value = new byte[head.second.limit()]; head.second.get(put.value.value); } put.key = new bamboo_key(); put.key.value = head.first; put.ttl_sec = 3600; // TODO StringBuffer sb = new StringBuffer(100); bytes_to_sbuf(head.first, 0, head.first.length, false, sb); logger.debug("putting block size=" + put.value.value.length + " key=0x" + sb.toString()); client.put(put, curry(put_done_cb, put)); } }
From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCache.java
/** * Store external with codec./*from w w w. j ava2 s . c o m*/ * Format: * 0..3 - total record size (-4) * 4..7 - size of a key in bytes (16 if use hash128) * 8 .. x - key data * x+1 ..x+1- IN_MEMORY flag ( 1- in memory, 0 - not) * x+2 ... block, serialized and compressed * * @param blockName the block name * @param buf the buf * @param inMemory the in memory * @throws IOException Signals that an I/O exception has occurred. */ private void storeExternalWithCodec(String blockName, Cacheable buf, boolean inMemory) throws IOException { // If external storage is disable - bail out if (overflowExtEnabled == false) { return; } byte[] hashed = Utils.hash128(blockName); ByteBuffer buffer = extStorageCache.getLocalBufferWithAddress().getBuffer(); deserializer.set(buf.getDeserializer()); SerDe serde = extStorageCache.getSerDe(); Codec codec = extStorageCache.getCompressionCodec(); buffer.clear(); buffer.position(4); // Save key buffer.putInt(hashed.length); buffer.put(hashed); buffer.put(inMemory ? (byte) 1 : (byte) 0); if (buf != null) { serde.writeCompressed(buffer, buf, codec); int pos = buffer.position(); buffer.putInt(0, pos - 4); } buffer.flip(); StorageHandle handle = storage.storeData(buffer); try { // WE USE byte array as a key extStorageCache.put(hashed, handle.toBytes()); } catch (Exception e) { throw new IOException(e); } }
From source file:edu.hawaii.soest.kilonalu.adcp.ADCPSource.java
/** * A method that executes the streaming of data from the source to the RBNB * server after all configuration of settings, connections to hosts, and * thread initiatizing occurs. This method contains the detailed code for * streaming the data and interpreting the stream. *///from w ww . j a v a 2 s.c o m protected boolean execute() { // do not execute the stream if there is no connection if (!isConnected()) return false; boolean failed = false; SocketChannel socket = getSocketConnection(); // while data are being sent, read them into the buffer try { // create four byte placeholders used to evaluate up to a four-byte // window. The FIFO layout looks like: // ------------------------- // in ---> | One | Two |Three|Four | ---> out // ------------------------- byte byteOne = 0x00, // set initial placeholder values byteTwo = 0x00, byteThree = 0x00, byteFour = 0x00; // Create a buffer that will store the ensemble bytes as they are read ByteBuffer ensembleBuffer = ByteBuffer.allocate(getBufferSize()); // create a byte buffer to store bytes from the TCP stream ByteBuffer buffer = ByteBuffer.allocateDirect(getBufferSize()); // add a channel of data that will be pushed to the server. // Each ensemble will be sent to the Data Turbine as an rbnb frame. ChannelMap rbnbChannelMap = new ChannelMap(); int channelIndex = rbnbChannelMap.Add(getRBNBChannelName()); // while there are bytes to read from the socket ... while (socket.read(buffer) != -1 || buffer.position() > 0) { // prepare the buffer for reading buffer.flip(); // while there are unread bytes in the ByteBuffer while (buffer.hasRemaining()) { byteOne = buffer.get(); // Use a State Machine to process the byte stream. // Start building an rbnb frame for the entire ensemble, first by // inserting a timestamp into the channelMap. This time is merely // the time of insert into the data turbine, not the time of // observations of the measurements. That time should be parsed out // of the ensemble in the Sink client code System.out.print("\rProcessed byte # " + ensembleByteCount + " " + new String(Hex.encodeHex((new byte[] { byteOne }))) + " - log msg is: "); switch (state) { case 0: // find ensemble header id if (byteOne == 0x7F && byteTwo == 0x7F) { ensembleByteCount++; // add Header ID ensembleChecksum += (byteTwo & 0xFF); ensembleByteCount++; // add Data Source ID ensembleChecksum += (byteOne & 0xFF); state = 1; break; } else { break; } case 1: // find the Ensemble Length (LSB) ensembleByteCount++; // add Ensemble Byte Count (LSB) ensembleChecksum += (byteOne & 0xFF); state = 2; break; case 2: // find the Ensemble Length (MSB) ensembleByteCount++; // add Ensemble Byte Count (MSB) ensembleChecksum += (byteOne & 0xFF); int upperEnsembleByte = (byteOne & 0xFF) << 8; int lowerEnsembleByte = (byteTwo & 0xFF); ensembleBytes = upperEnsembleByte + lowerEnsembleByte; logger.debug("Number of Bytes in the Ensemble: " + ensembleBytes); if (ensembleBuffer.remaining() > 0) { ensembleBuffer.put(byteFour); ensembleBuffer.put(byteThree); ensembleBuffer.put(byteTwo); ensembleBuffer.put(byteOne); } else { ensembleBuffer.compact(); ensembleBuffer.put(byteFour); ensembleBuffer.put(byteThree); ensembleBuffer.put(byteTwo); ensembleBuffer.put(byteOne); } state = 3; break; // verify that the header is real, not a random 0x7F7F case 3: // find the number of data types in the ensemble // set the numberOfDataTypes byte if (ensembleByteCount == NUMBER_OF_DATA_TYPES_OFFSET - 1) { ensembleByteCount++; ensembleChecksum += (byteOne & 0xFF); numberOfDataTypes = (byteOne & 0xFF); // calculate the number of bytes to the Fixed Leader ID dataTypeOneOffset = 6 + (2 * numberOfDataTypes); if (ensembleBuffer.remaining() > 0) { ensembleBuffer.put(byteOne); } else { ensembleBuffer.compact(); ensembleBuffer.put(byteOne); } state = 4; break; } else { ensembleByteCount++; ensembleChecksum += (byteOne & 0xFF); if (ensembleBuffer.remaining() > 0) { ensembleBuffer.put(byteOne); } else { ensembleBuffer.compact(); ensembleBuffer.put(byteOne); } break; } case 4: // find the offset to data type #1 and verify the header ID if ((ensembleByteCount == dataTypeOneOffset + 1) && byteOne == 0x00 && byteTwo == 0x00) { ensembleByteCount++; ensembleChecksum += (byteOne & 0xFF); // we are confident that the previous sequence of 0x7F7F is truly // an headerID and not a random occurrence in the stream because // we have identified the Fixed Leader ID (0x0000) the correct // number of bytes beyond the 0x7F7F headerIsVerified = true; if (ensembleBuffer.remaining() > 0) { ensembleBuffer.put(byteOne); } else { ensembleBuffer.compact(); ensembleBuffer.put(byteOne); } state = 5; break; } else { if (ensembleByteCount > dataTypeOneOffset + 1) { // We've hit a random 0x7F7F byte sequence that is not a true // ensemble header id. Reset the processing and look for the // next 0x7F7F sequence in the stream ensembleByteCount = 0; ensembleChecksum = 0; dataTypeOneOffset = 0; numberOfDataTypes = 0; headerIsVerified = false; ensembleBuffer.clear(); rbnbChannelMap.Clear(); channelIndex = rbnbChannelMap.Add(getRBNBChannelName()); byteOne = 0x00; byteTwo = 0x00; byteThree = 0x00; byteFour = 0x00; state = 0; if (ensembleBuffer.remaining() > 0) { ensembleBuffer.put(byteOne); } else { ensembleBuffer.compact(); ensembleBuffer.put(byteOne); } break; } else { // We are still parsing bytes between the purported header ID // and fixed leader ID. Keep parsing until we hit the fixed // leader ID, or until we are greater than the dataTypeOneOffset // stated value. ensembleByteCount++; ensembleChecksum += (byteOne & 0xFF); if (ensembleBuffer.remaining() > 0) { ensembleBuffer.put(byteOne); } else { ensembleBuffer.compact(); ensembleBuffer.put(byteOne); } break; } } case 5: // read the rest of the bytes to the next Header ID // if we've made it to the next ensemble's header id, prepare to // flush the data. Also check that the calculated byte count // is greater than the recorded byte count in case of finding an // arbitrary 0x7f 0x7f sequence in the data stream if (byteOne == 0x7F && byteTwo == 0x7F && (ensembleByteCount == ensembleBytes + 3) && headerIsVerified) { // remove the last bytes from the count (byteOne and byteTwo) ensembleByteCount -= 1; // remove the last three bytes from the checksum: // the two checksum bytes are not included, and the two 0x7f //bytes belong to the next ensemble, and one of them was // previously added. Reset the buffer position due to this too. //ensembleChecksum -= (byteOne & 0xFF); ensembleChecksum -= (byteTwo & 0xFF); ensembleChecksum -= (byteThree & 0xFF); ensembleChecksum -= (byteFour & 0xFF); // We are consistently 1 byte over in the checksum. Trim it. We need to // troubleshoot why this is. CSJ 12/18/2007 ensembleChecksum = ensembleChecksum - 1; // jockey byteThree into LSB, byteFour into MSB int upperChecksumByte = (byteThree & 0xFF) << 8; int lowerChecksumByte = (byteFour & 0xFF); int trueChecksum = upperChecksumByte + lowerChecksumByte; if (ensembleBuffer.remaining() > 0) { ensembleBuffer.put((byte) lowerChecksumByte); ensembleBuffer.put((byte) (upperChecksumByte >> 8)); } else { ensembleBuffer.compact(); ensembleBuffer.put((byte) lowerChecksumByte); ensembleBuffer.put((byte) (upperChecksumByte >> 8)); } // check if the calculated checksum (modulo 65535) is equal // to the true checksum; if so, flush to the data turbine // Also, if the checksums are off by 1 byte, also flush the // data. We need to troubleshoot this bug CSJ 06/11/2008 if (((ensembleChecksum % 65535) == trueChecksum) || ((ensembleChecksum + 1) % 65535 == trueChecksum) || ((ensembleChecksum - 1) % 65535 == trueChecksum)) { // extract just the length of the ensemble bytes out of the // ensemble buffer, and place it in the channel map as a // byte array. Then, send it to the data turbine. byte[] ensembleArray = new byte[ensembleByteCount]; ensembleBuffer.flip(); ensembleBuffer.get(ensembleArray); // send the ensemble to the data turbine rbnbChannelMap.PutTimeAuto("server"); rbnbChannelMap.PutDataAsByteArray(channelIndex, ensembleArray); getSource().Flush(rbnbChannelMap); logger.debug("flushed: " + ensembleByteCount + " " + "ens cksum: " + ensembleChecksum + "\t\t" + "ens pos: " + ensembleBuffer.position() + "\t" + "ens rem: " + ensembleBuffer.remaining() + "\t" + "buf pos: " + buffer.position() + "\t" + "buf rem: " + buffer.remaining() + "\t" + "state: " + state); logger.info("Sent ADCP ensemble to the data turbine."); // only clear all four bytes if we are not one or two bytes // from the end of the byte buffer (i.e. the header id // is split or is all in the previous buffer) if (byteOne == 0x7f && byteTwo == 0x7f && ensembleByteCount > ensembleBytes && buffer.position() == 0) { byteThree = 0x00; byteFour = 0x00; logger.debug("Cleared ONLY b3, b4."); } else if (byteOne == 0x7f && ensembleByteCount > ensembleBytes && buffer.position() == 1) { buffer.position(buffer.position() - 1); byteTwo = 0x00; byteThree = 0x00; byteFour = 0x00; logger.debug("Cleared ONLY b2, b3, b4."); } else { byteOne = 0x00; byteTwo = 0x00; byteThree = 0x00; byteFour = 0x00; logger.debug("Cleared ALL b1, b2, b3, b4."); } //rewind the position to before the next ensemble's header id if (buffer.position() >= 2) { buffer.position(buffer.position() - 2); logger.debug("Moved position back two, now: " + buffer.position()); } ensembleBuffer.clear(); ensembleByteCount = 0; ensembleBytes = 0; ensembleChecksum = 0; state = 0; break; } else { // The checksums don't match, move on logger.info("not equal: " + "calc chksum: " + (ensembleChecksum % 65535) + "\tens chksum: " + trueChecksum + "\tbuf pos: " + buffer.position() + "\tbuf rem: " + buffer.remaining() + "\tens pos: " + ensembleBuffer.position() + "\tens rem: " + ensembleBuffer.remaining() + "\tstate: " + state); rbnbChannelMap.Clear(); channelIndex = rbnbChannelMap.Add(getRBNBChannelName()); ensembleBuffer.clear(); ensembleByteCount = 0; ensembleChecksum = 0; ensembleBuffer.clear(); state = 0; break; } } else { // still in the middle of the ensemble, keep adding bytes ensembleByteCount++; // add each byte found ensembleChecksum += (byteOne & 0xFF); if (ensembleBuffer.remaining() > 0) { ensembleBuffer.put(byteOne); } else { ensembleBuffer.compact(); ensembleBuffer.put(byteOne); } break; } } // shift the bytes in the FIFO window byteFour = byteThree; byteThree = byteTwo; byteTwo = byteOne; logger.debug("remaining:\t" + buffer.remaining() + "\tstate:\t" + state + "\tens byte count:\t" + ensembleByteCount + "\tens bytes:\t" + ensembleBytes + "\tver:\t" + headerIsVerified + "\tbyte value:\t" + new String(Hex.encodeHex((new byte[] { byteOne })))); } //end while (more unread bytes) // prepare the buffer to read in more bytes from the stream buffer.compact(); } // end while (more socket bytes to read) socket.close(); } catch (IOException e) { // handle exceptions // In the event of an i/o exception, log the exception, and allow execute() // to return false, which will prompt a retry. failed = true; e.printStackTrace(); return !failed; } catch (SAPIException sapie) { // In the event of an RBNB communication exception, log the exception, // and allow execute() to return false, which will prompt a retry. failed = true; sapie.printStackTrace(); return !failed; } return !failed; }
From source file:byps.test.TestSerializePrimitiveTypes.java
public void internaltestPerformancePrimitiveTypes(BBinaryModel protocol, int objCount, int loopCount, int flags) throws IOException { TestUtils.protocol = protocol;//from w ww. j a va 2 s. c o m BTransport transport = TestUtils.createTransport(flags, BMessageHeader.BYPS_VERSION_CURRENT, 0); BOutput bout = transport.getOutput(); long bytesCount = 0; ByteBuffer buf = null; ListTypes obj = new ListTypes(); obj.primitiveTypes1 = new ArrayList<PrimitiveTypes>(); for (int i = 0; i < objCount; i++) { PrimitiveTypes item = TestUtils.createObjectPrimitiveTypes(); obj.primitiveTypes1.add(item); } long t1 = System.currentTimeMillis(); ByteArrayOutputStream os = null; for (int i = 0; i < loopCount; i++) { bout.store(obj); buf = bout.toByteBuffer(); os = new ByteArrayOutputStream(); BWire.bufferToStream(buf, (transport.getWire().getFlags() & BWire.FLAG_GZIP) != 0, os); } long t2 = System.currentTimeMillis(); bytesCount = (long) loopCount * buf.remaining(); //TestUtils.printBuffer(log, buf); long t3 = System.currentTimeMillis(); for (int i = 0; i < loopCount; i++) { ByteArrayInputStream is = new ByteArrayInputStream(os.toByteArray()); buf = BWire.bufferFromStream(is); BInput bin = transport.getInput(null, buf); Object objR = bin.load(); if (i == 0) { TestUtils.assertEquals(null, "obj", obj, objR); } buf.flip(); } long t4 = System.currentTimeMillis(); log.info("bmodel=" + protocol.toString().substring(0, 2) + ", gzip=" + ((flags & BWire.FLAG_GZIP) != 0) + ", #objs=" + String.format("%6d", objCount) + ", #loops=" + String.format("%6d", loopCount) + ", ser-ms=" + String.format("%6d", (t2 - t1) + (t4 - t3)) + "/" + String.format("%6d", (t2 - t1)) + "/" + String.format("%6d", (t4 - t3)) + ", #kb-per-message=" + String.format("%4.2f", (double) os.size() / 1000) + ", #kb-total=" + String.format("%4.2f", (double) bytesCount / 1000)); }
From source file:com.healthmarketscience.jackcess.impl.ColumnImpl.java
/** * Serialize an Object into a raw byte value for this column * @param obj Object to serialize/*from www . j av a2 s .c om*/ * @param order Order in which to serialize * @return A buffer containing the bytes * @usage _advanced_method_ */ protected ByteBuffer writeFixedLengthField(Object obj, ByteOrder order) throws IOException { int size = getType().getFixedSize(_columnLength); ByteBuffer buffer = writeFixedLengthField(obj, PageChannel.createBuffer(size, order)); buffer.flip(); return buffer; }
From source file:org.apache.nifi.processors.standard.TailFile.java
/** * Read new lines from the given FileChannel, copying it to the given Output * Stream. The Checksum is used in order to later determine whether or not * data has been consumed./* w w w.j a v a 2 s .c o m*/ * * @param reader The FileChannel to read data from * @param buffer the buffer to use for copying data * @param out the OutputStream to copy the data to * @param checksum the Checksum object to use in order to calculate checksum * for recovery purposes * * @return The new position after the lines have been read * @throws java.io.IOException if an I/O error occurs. */ private long readLines(final FileChannel reader, final ByteBuffer buffer, final OutputStream out, final Checksum checksum) throws IOException { getLogger().debug("Reading lines starting at position {}", new Object[] { reader.position() }); try (final ByteArrayOutputStream baos = new ByteArrayOutputStream()) { long pos = reader.position(); long rePos = pos; // position to re-read int num; int linesRead = 0; boolean seenCR = false; buffer.clear(); while (((num = reader.read(buffer)) != -1)) { buffer.flip(); for (int i = 0; i < num; i++) { byte ch = buffer.get(i); switch (ch) { case '\n': { baos.write(ch); seenCR = false; baos.writeTo(out); final byte[] baosBuffer = baos.toByteArray(); checksum.update(baosBuffer, 0, baos.size()); if (getLogger().isTraceEnabled()) { getLogger().trace("Checksum updated to {}", new Object[] { checksum.getValue() }); } baos.reset(); rePos = pos + i + 1; linesRead++; break; } case '\r': { baos.write(ch); seenCR = true; break; } default: { if (seenCR) { seenCR = false; baos.writeTo(out); final byte[] baosBuffer = baos.toByteArray(); checksum.update(baosBuffer, 0, baos.size()); if (getLogger().isTraceEnabled()) { getLogger().trace("Checksum updated to {}", new Object[] { checksum.getValue() }); } linesRead++; baos.reset(); baos.write(ch); rePos = pos + i; } else { baos.write(ch); } } } } pos = reader.position(); } if (rePos < reader.position()) { getLogger().debug("Read {} lines; repositioning reader from {} to {}", new Object[] { linesRead, pos, rePos }); reader.position(rePos); // Ensure we can re-read if necessary } return rePos; } }
From source file:com.healthmarketscience.jackcess.impl.ColumnImpl.java
protected ByteBuffer writeRealData(Object obj, int remainingRowLength, ByteOrder order) throws IOException { if (!isVariableLength() || !getType().isVariableLength()) { return writeFixedLengthField(obj, order); }/*from w w w . j a va2s . c o m*/ // this is an "inline" var length field switch (getType()) { case NUMERIC: // don't ask me why numerics are "var length" columns... ByteBuffer buffer = PageChannel.createBuffer(getType().getFixedSize(), order); writeNumericValue(buffer, obj); buffer.flip(); return buffer; case TEXT: return encodeTextValue(obj, 0, getLengthInUnits(), false).order(order); case BINARY: case UNKNOWN_0D: case UNSUPPORTED_VARLEN: // should already be "encoded" break; default: throw new RuntimeException("unexpected inline var length type: " + getType()); } ByteBuffer buffer = ByteBuffer.wrap(toByteArray(obj)).order(order); return buffer; }
From source file:org.apache.hadoop.hbase.io.hfile.HFileBlock.java
/** * For use by bucketcache. This exposes internals. *///from ww w . ja v a 2 s.co m public ByteBuffer getMetaData() { ByteBuffer bb = ByteBuffer.allocate(BLOCK_METADATA_SPACE); bb = addMetaData(bb); bb.flip(); return bb; }
From source file:org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl.java
private ByteBuffer createByteBufferFromByteArray(byte[] bytes) { ByteBuffer buffer = ByteBuffer.allocate(bytes.length); for (int i = 0; i < bytes.length; i++) { buffer.put(bytes[i]);//from w ww. ja v a 2 s. c o m } buffer.flip(); return buffer; }