List of usage examples for java.nio ByteBuffer limit
public final int limit()
From source file:org.apache.hadoop.hdfs.nfs.nfs3.WriteCtx.java
public void writeData(HdfsDataOutputStream fos) throws IOException { Preconditions.checkState(fos != null); ByteBuffer dataBuffer = null; try {/* w w w . ja v a 2s . c o m*/ dataBuffer = getData(); } catch (Exception e1) { LOG.error("Failed to get request data offset:" + offset + " count:" + count + " error:" + e1); throw new IOException("Can't get WriteCtx.data"); } byte[] data = dataBuffer.array(); int position = dataBuffer.position(); int limit = dataBuffer.limit(); Preconditions.checkState(limit - position == count); // Modified write has a valid original count if (position != 0) { if (limit != getOriginalCount()) { throw new IOException("Modified write has differnt original size." + "buff position:" + position + " buff limit:" + limit + ". " + toString()); } } // Now write data fos.write(data, position, count); }
From source file:org.apache.hadoop.hbase.io.ValueSplitHalfStoreFileReader.java
@Override public HFileScanner getScanner(final boolean cacheBlocks, final boolean pread, final boolean isCompaction) { final HFileScanner s = getHFileReader().getScanner(cacheBlocks, pread, isCompaction); return new HFileScanner() { final HFileScanner delegate = s; @Override// w w w . j a va 2 s .com public ByteBuffer getKey() { return delegate.getKey(); } @Override public String getKeyString() { return delegate.getKeyString(); } @Override public ByteBuffer getValue() { return delegate.getValue(); } @Override public String getValueString() { return delegate.getValueString(); } @Override public KeyValue getKeyValue() { return delegate.getKeyValue(); } @Override public boolean next() throws IOException { while (delegate.next()) { if (isCurrentKVValid()) { return true; } } return false; } @Override public boolean seekBefore(byte[] key) throws IOException { return seekBefore(key, 0, key.length); } @Override public boolean seekBefore(byte[] key, int offset, int length) throws IOException { byte[] seekKey = key; int seekKeyOffset = offset; int seekKeyLength = length; while (delegate.seekBefore(seekKey, seekKeyOffset, seekKeyLength)) { if (isCurrentKVValid()) { return true; } ByteBuffer curKey = getKey(); if (curKey == null) return false; seekKey = curKey.array(); seekKeyOffset = curKey.arrayOffset(); seekKeyLength = curKey.limit(); } return false; } private boolean isCurrentKVValid() { ByteBuffer value = getValue(); if (!top) { // Current value < split key, it belongs to bottom, return true if (Bytes.compareTo(value.array(), value.arrayOffset(), value.limit(), splitvalue, 0, splitvalue.length) < 0) { return true; } } else { if (Bytes.compareTo(value.array(), value.arrayOffset(), value.limit(), splitvalue, 0, splitvalue.length) >= 0) { return true; } } return false; } @Override public boolean seekTo() throws IOException { boolean b = delegate.seekTo(); if (!b) { return b; } if (isCurrentKVValid()) { return true; } return next(); } @Override public int seekTo(byte[] key) throws IOException { return seekTo(key, 0, key.length); } public int seekTo(byte[] key, int offset, int length) throws IOException { int b = delegate.seekTo(key, offset, length); if (b < 0) { return b; } else { if (isCurrentKVValid()) { return b; } else { boolean existBefore = seekBefore(key, offset, length); if (existBefore) { return 1; } return -1; } } } @Override public int reseekTo(byte[] key) throws IOException { return reseekTo(key, 0, key.length); } @Override public int reseekTo(byte[] key, int offset, int length) throws IOException { int b = delegate.reseekTo(key, offset, length); if (b < 0) { return b; } else { if (isCurrentKVValid()) { return b; } else { boolean existBefore = seekBefore(key, offset, length); if (existBefore) { return 1; } return -1; } } } public org.apache.hadoop.hbase.io.hfile.HFile.Reader getReader() { return this.delegate.getReader(); } public boolean isSeeked() { return this.delegate.isSeeked(); } }; }
From source file:tachyon.master.RawTables.java
/** * Update the metadata of the specified raw table. It will check if the table exists. * * @param tableId The id of the raw table * @param metadata The new metadata of the raw table * @throws TachyonException//from ww w . j a va 2 s . c o m */ // TODO add version number. public synchronized void updateMetadata(int tableId, ByteBuffer metadata) throws TachyonException { Pair<Integer, ByteBuffer> data = mData.get(tableId); if (null == data) { throw new TachyonException("The raw table " + tableId + " does not exist."); } if (metadata == null) { data.setSecond(ByteBuffer.allocate(0)); } else { long maxVal = mTachyonConf.getBytes(Constants.MAX_TABLE_METADATA_BYTE, 0L); if (metadata.limit() - metadata.position() >= maxVal) { throw new TachyonException("Too big table metadata: " + metadata.toString()); } ByteBuffer tMetadata = ByteBuffer.allocate(metadata.limit() - metadata.position()); tMetadata.put(metadata.array(), metadata.position(), metadata.limit() - metadata.position()); tMetadata.flip(); data.setSecond(tMetadata); } }
From source file:org.darkware.wpman.security.ChecksumDatabase.java
/** * Perform a checksum calculation on the given {@link ReadableByteChannel}. Other code should not create * implementations which are dependant on any particular characteristics of the checksum, but the checksum * is very likely to be based on a cryptographic-strength hash. The results of the checksum are encoded as * a base64 {@code String}./*from www.ja va 2 s . c o m*/ * * @param channel The {@code ReadableByteChannel} to read data from. * @return A Base64 encoded {@code String} representing the checksum. * @throws IOException If there was an error while reading data from the channel. * @see Base64#encodeBase64String(byte[]) */ protected String doChecksum(ReadableByteChannel channel) throws IOException { Hasher hasher = Hashing.sha256().newHasher(); final ByteBuffer block = ByteBuffer.allocate(4096); while (channel.isOpen()) { int bytesRead = channel.read(block); if (bytesRead > 0) { block.flip(); hasher.putBytes(block.array(), 0, block.limit()); block.clear(); } else if (bytesRead == -1) { channel.close(); } } return Base64.encodeBase64String(hasher.hash().asBytes()); }
From source file:bamboo.openhash.fileshare.FileShare.java
/** * Transfer wblocks from the wblocks array to the ready queue. *///from w w w. ja v a 2 s. c o m public void make_parents(boolean done) { for (int l = 0; l < wblocks.size(); ++l) { logger.debug("level " + l + " of " + wblocks.size() + " size=" + wblocks.elementAt(l).size() + " done=" + done); while ((wblocks.elementAt(l).size() >= BRANCHING) || (done && (wblocks.elementAt(l).size() > 1))) { int count = min(BRANCHING, wblocks.elementAt(l).size()); logger.debug("count=" + count); for (int i = 0; i < count; ++i) { ByteBuffer bb = wblocks.elementAt(l).removeFirst(); bb.flip(); md.update(secret); md.update(bb.array(), 0, bb.limit()); byte[] dig = md.digest(); ready.addLast(new Pair<byte[], ByteBuffer>(dig, bb)); if (l + 1 >= wblocks.size()) { wblocks.setSize(max(wblocks.size(), l + 2)); wblocks.setElementAt(new LinkedList<ByteBuffer>(), l + 1); } LinkedList<ByteBuffer> next_level = wblocks.elementAt(l + 1); if (next_level.isEmpty() || (next_level.getLast().position() == 1024)) { logger.debug("adding a new block to level " + (l + 1)); next_level.addLast(ByteBuffer.wrap(new byte[1024])); next_level.getLast().putInt(l + 1); } logger.debug("adding a digest to level " + (l + 1)); next_level.getLast().put(dig); } if (done) break; } } logger.debug("make_parents done"); }
From source file:org.apache.hadoop.hbase.io.HalfStoreFileReader.java
@Override public HFileScanner getScanner(final boolean cacheBlocks, final boolean pread, final boolean isCompaction) { final HFileScanner s = super.getScanner(cacheBlocks, pread, isCompaction); return new HFileScanner() { final HFileScanner delegate = s; public boolean atEnd = false; public ByteBuffer getKey() { if (atEnd) return null; return delegate.getKey(); }/*w ww. ja v a 2 s. co m*/ public String getKeyString() { if (atEnd) return null; return delegate.getKeyString(); } public ByteBuffer getValue() { if (atEnd) return null; return delegate.getValue(); } public String getValueString() { if (atEnd) return null; return delegate.getValueString(); } public Cell getKeyValue() { if (atEnd) return null; return delegate.getKeyValue(); } public boolean next() throws IOException { if (atEnd) return false; boolean b = delegate.next(); if (!b) { return b; } // constrain the bottom. if (!top) { ByteBuffer bb = getKey(); if (getComparator().compareFlatKey(bb.array(), bb.arrayOffset(), bb.limit(), splitkey, 0, splitkey.length) >= 0) { atEnd = true; return false; } } return true; } @Override public boolean seekBefore(byte[] key) throws IOException { return seekBefore(key, 0, key.length); } @Override public boolean seekBefore(byte[] key, int offset, int length) throws IOException { return seekBefore(new KeyValue.KeyOnlyKeyValue(key, offset, length)); } @Override public boolean seekTo() throws IOException { if (top) { int r = this.delegate.seekTo(new KeyValue.KeyOnlyKeyValue(splitkey, 0, splitkey.length)); if (r == HConstants.INDEX_KEY_MAGIC) { return true; } if (r < 0) { // midkey is < first key in file return this.delegate.seekTo(); } if (r > 0) { return this.delegate.next(); } return true; } boolean b = delegate.seekTo(); if (!b) { return b; } // Check key. ByteBuffer k = this.delegate.getKey(); return this.delegate.getReader().getComparator().compareFlatKey(k.array(), k.arrayOffset(), k.limit(), splitkey, 0, splitkey.length) < 0; } @Override public int seekTo(byte[] key) throws IOException { return seekTo(key, 0, key.length); } @Override public int seekTo(byte[] key, int offset, int length) throws IOException { return seekTo(new KeyValue.KeyOnlyKeyValue(key, offset, length)); } @Override public int reseekTo(byte[] key) throws IOException { return reseekTo(key, 0, key.length); } @Override public int reseekTo(byte[] key, int offset, int length) throws IOException { //This function is identical to the corresponding seekTo function except //that we call reseekTo (and not seekTo) on the delegate. return reseekTo(new KeyValue.KeyOnlyKeyValue(key, offset, length)); } public org.apache.hadoop.hbase.io.hfile.HFile.Reader getReader() { return this.delegate.getReader(); } public boolean isSeeked() { return this.delegate.isSeeked(); } @Override public int seekTo(Cell key) throws IOException { if (top) { if (getComparator().compareOnlyKeyPortion(key, splitCell) < 0) { return -1; } } else { if (getComparator().compareOnlyKeyPortion(key, splitCell) >= 0) { // we would place the scanner in the second half. // it might be an error to return false here ever... boolean res = delegate.seekBefore(splitCell); if (!res) { throw new IOException( "Seeking for a key in bottom of file, but key exists in top of file, " + "failed on seekBefore(midkey)"); } return 1; } } return delegate.seekTo(key); } @Override public int reseekTo(Cell key) throws IOException { // This function is identical to the corresponding seekTo function // except // that we call reseekTo (and not seekTo) on the delegate. if (top) { if (getComparator().compareOnlyKeyPortion(key, splitCell) < 0) { return -1; } } else { if (getComparator().compareOnlyKeyPortion(key, splitCell) >= 0) { // we would place the scanner in the second half. // it might be an error to return false here ever... boolean res = delegate.seekBefore(splitCell); if (!res) { throw new IOException("Seeking for a key in bottom of file, but" + " key exists in top of file, failed on seekBefore(midkey)"); } return 1; } } if (atEnd) { // skip the 'reseek' and just return 1. return 1; } return delegate.reseekTo(key); } @Override public boolean seekBefore(Cell key) throws IOException { if (top) { Cell fk = new KeyValue.KeyOnlyKeyValue(getFirstKey(), 0, getFirstKey().length); if (getComparator().compareOnlyKeyPortion(key, fk) <= 0) { return false; } } else { // The equals sign isn't strictly necessary just here to be consistent // with seekTo if (getComparator().compareOnlyKeyPortion(key, splitCell) >= 0) { return this.delegate.seekBefore(splitCell); } } return this.delegate.seekBefore(key); } }; }
From source file:com.rs.worldserver.io.IOClient.java
public void read(ByteBuffer buffer) { inStream.currentOffset = 0;/*from w ww. j a va2 s . c om*/ buffer.flip(); buffer.get(inStream.buffer, 0, buffer.limit()); inStream.length = buffer.limit(); try { try { System.out.println("Processing"); process(); } catch (Exception e) { e.printStackTrace(); Server.getIoThread().destroySocket(Server.getIoThread().socketFor(this), connectedFrom.toString(), true); } } catch (Exception e) { } }
From source file:com.intel.chimera.cipher.Openssl.java
/** * Continues a multiple-part encryption or decryption operation. The data * is encrypted or decrypted, depending on how this cipher was initialized. * <p/>/*from ww w. j a v a 2 s . com*/ * * All <code>input.remaining()</code> bytes starting at * <code>input.position()</code> are processed. The result is stored in * the output buffer. * <p/> * * Upon return, the input buffer's position will be equal to its limit; * its limit will not have changed. The output buffer's position will have * advanced by n, when n is the value returned by this method; the output * buffer's limit will not have changed. * <p/> * * If <code>output.remaining()</code> bytes are insufficient to hold the * result, a <code>ShortBufferException</code> is thrown. * * @param input the input ByteBuffer * @param output the output ByteBuffer * @return int number of bytes stored in <code>output</code> * @throws ShortBufferException if there is insufficient space in the * output buffer */ public int update(ByteBuffer input, ByteBuffer output) throws ShortBufferException { checkState(); Utils.checkArgument(input.isDirect() && output.isDirect(), "Direct buffers are required."); int len = OpensslNative.update(context, input, input.position(), input.remaining(), output, output.position(), output.remaining()); input.position(input.limit()); output.position(output.position() + len); return len; }
From source file:io.Text.java
/** Set to contain the contents of a string. *//* www . j a va 2s . c o m*/ public void set(String string) { try { ByteBuffer bb = encode(string, true); bytes = bb.array(); length = bb.limit(); } catch (CharacterCodingException e) { throw new RuntimeException("Should not have happened " + e.toString()); } }
From source file:org.apache.hadoop.hive.ql.exec.persistence.AnalysisBuffer.java
public Row getByRowid(int rowid) { if (rowid > lastrowid || rowid < firstrowid) return null; if (!fileused || rowid >= membuffer.firstrowid) { return membuffer.getByRowid(rowid); } else {// ww w .ja va 2 s . c o m ByteBuffer buf = indexedFile.get(rowid); return deserialize(buf.array(), 0, buf.limit()); } }